diff --git a/.github/workflows/build-extension.yml b/.github/workflows/build-extension.yml index 7c7dc5fb8b4f39..14998f24144b73 100644 --- a/.github/workflows/build-extension.yml +++ b/.github/workflows/build-extension.yml @@ -84,39 +84,39 @@ jobs: - name: Build broker run: | cd fs_brokers/apache_hdfs_broker/ && /bin/bash build.sh - build-docs: - name: Build Documents - needs: changes - if: ${{ needs.changes.outputs.docs_changes == 'true' }} - runs-on: ubuntu-latest - steps: - - name: Checkout ${{ github.ref }} - uses: actions/checkout@v3 + # build-docs: + # name: Build Documents + # needs: changes + # if: ${{ needs.changes.outputs.docs_changes == 'true' }} + # runs-on: ubuntu-latest + # steps: + # - name: Checkout ${{ github.ref }} + # uses: actions/checkout@v3 - - name: Build docs - run: | - cd docs && /bin/bash build_help_zip.sh - - name: Build - run: | - git clone https://github.com/apache/doris-website.git website - cd website - echo "[\"current\"]" > versions.json - mkdir -p docs - cp -R ../docs/en/docs/* docs/ - cp -R ../docs/sidebars.json sidebars.json - mkdir -p i18n/zh-CN/docusaurus-plugin-content-docs/current - cp -R ../docs/zh-CN/docs/* i18n/zh-CN/docusaurus-plugin-content-docs/current/ - cp -R ../docs/dev.json i18n/zh-CN/docusaurus-plugin-content-docs/current.json + # - name: Build docs + # run: | + # cd docs && /bin/bash build_help_zip.sh + # - name: Build + # run: | + # git clone https://github.com/apache/doris-website.git website + # cd website + # echo "[\"current\"]" > versions.json + # mkdir -p docs + # cp -R ../docs/en/docs/* docs/ + # cp -R ../docs/sidebars.json sidebars.json + # mkdir -p i18n/zh-CN/docusaurus-plugin-content-docs/current + # cp -R ../docs/zh-CN/docs/* i18n/zh-CN/docusaurus-plugin-content-docs/current/ + # cp -R ../docs/dev.json i18n/zh-CN/docusaurus-plugin-content-docs/current.json - mkdir -p community - cp -R ../docs/en/community/* community/ - mkdir -p i18n/zh-CN/docusaurus-plugin-content-docs-community/current/ - cp -R ../docs/zh-CN/community/* i18n/zh-CN/docusaurus-plugin-content-docs-community/current/ - cp -R ../docs/sidebarsCommunity.json . + # mkdir -p community + # cp -R ../docs/en/community/* community/ + # mkdir -p i18n/zh-CN/docusaurus-plugin-content-docs-community/current/ + # cp -R ../docs/zh-CN/community/* i18n/zh-CN/docusaurus-plugin-content-docs-community/current/ + # cp -R ../docs/sidebarsCommunity.json . - cp -R ../docs/images static/ - npm install -g yarn - yarn cache clean - yarn && yarn build - cd ../ - rm -rf website + # cp -R ../docs/images static/ + # npm install -g yarn + # yarn cache clean + # yarn && yarn build + # cd ../ + # rm -rf website diff --git a/build.sh b/build.sh index 3406f76ae45efa..8b5514e0beed21 100755 --- a/build.sh +++ b/build.sh @@ -472,12 +472,13 @@ fi # Assesmble FE modules FE_MODULES='' +# TODO: docs are temporarily removed, so this var is always OFF +# Fix it later BUILD_DOCS='OFF' modules=("") if [[ "${BUILD_FE}" -eq 1 ]]; then modules+=("fe-common") modules+=("fe-core") - BUILD_DOCS='ON' fi if [[ "${BUILD_SPARK_DPP}" -eq 1 ]]; then modules+=("fe-common") @@ -692,7 +693,7 @@ if [[ "${BUILD_FE}" -eq 1 ]]; then rm -rf "${DORIS_OUTPUT}/fe/lib"/* cp -r -p "${DORIS_HOME}/fe/fe-core/target/lib"/* "${DORIS_OUTPUT}/fe/lib"/ cp -r -p "${DORIS_HOME}/fe/fe-core/target/doris-fe.jar" "${DORIS_OUTPUT}/fe/lib"/ - cp -r -p "${DORIS_HOME}/docs/build/help-resource.zip" "${DORIS_OUTPUT}/fe/lib"/ + #cp -r -p "${DORIS_HOME}/docs/build/help-resource.zip" "${DORIS_OUTPUT}/fe/lib"/ cp -r -p "${DORIS_HOME}/minidump" "${DORIS_OUTPUT}/fe"/ cp -r -p "${DORIS_HOME}/webroot/static" "${DORIS_OUTPUT}/fe/webroot"/ diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/any-value.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/any-value.md deleted file mode 100644 index 0f139905c8bf6c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/any-value.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "ANY_VALUE", - "language": "en" -} ---- - - - -## ANY_VALUE - - - -ANY_VALUE - - - - -### description -#### Syntax - -`ANY_VALUE(expr)` - -If there is a non NULL value in expr, any non NULL value is returned; otherwise, NULL is returned. - -Alias function: `ANY(expr)` - -### example -``` -mysql> select id, any_value(name) from cost2 group by id; -+------+-------------------+ -| id | any_value(`name`) | -+------+-------------------+ -| 3 | jack | -| 2 | jack | -+------+-------------------+ -``` -### keywords -ANY_VALUE, ANY diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/approx-count-distinct.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/approx-count-distinct.md deleted file mode 100644 index cad7c8110b8805..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/approx-count-distinct.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -{ - "title": "APPROX_COUNT_DISTINCT", - "language": "en" -} ---- - - - -## APPROX_COUNT_DISTINCT -### Description -#### Syntax - -`APPROX_COUNT_DISTINCT (expr)` - - -Returns an approximate aggregation function similar to the result of COUNT (DISTINCT col). - -It combines COUNT and DISTINCT faster and uses fixed-size memory, so less memory can be used for columns with high cardinality. - -### example -``` -MySQL > select approx_count_distinct(query_id) from log_statis group by datetime; -+-----------------+ -| approx_count_distinct(`query_id`) | -+-----------------+ -| 17721 | -+-----------------+ -``` -### keywords - -APPROX_COUNT_DISTINCT diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/array-agg.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/array-agg.md deleted file mode 100644 index de61da28628eab..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/array-agg.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -{ - "title": "ARRAY_AGG", - "language": "en" -} ---- - - - -## ARRAY_AGG - -### description - -#### Syntax - -`ARRAY_AGG(col)` - -Concatenation of values in a column (including the null value) into an array can be used for multiple rows to one row (row to column). - -### notice - -- The order of the elements in an array is not guaranteed. -- Returns the array generated by the transformation. The element type in the array is the same as the col type. - -### example - -```sql -mysql> select * from test_doris_array_agg; - -+------+------+ - -| c1 | c2 | - -+------+------+ - -| 1 | a | - -| 1 | b | - -| 2 | c | - -| 2 | NULL | - -| 3 | NULL | - -+------+------+ - -mysql> select c1, array_agg(c2) from test_doris_array_agg group by c1; - -+------+-----------------+ - -| c1 | array_agg(`c2`) | - -+------+-----------------+ - -| 1 | ["a","b"] | - -| 2 | [NULL,"c"] | - -| 3 | [NULL] | - -+------+-----------------+ -``` - -### keywords - -ARRAY_AGG diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/avg-weighted.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/avg-weighted.md deleted file mode 100644 index b8805fc6c76ee4..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/avg-weighted.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -{ - "title": "AVG_WEIGHTED", - "language": "en" -} ---- - - - - -## AVG_WEIGHTED -### Description -#### Syntax - -` double avg_weighted(x, weight)` - -Calculate the weighted arithmetic mean, which is the sum of the products of all corresponding values and weights, divided the total weight sum. -If the sum of all weights equals 0, NaN will be returned. - -### example - -``` -mysql> select avg_weighted(k2,k1) from baseall; -+--------------------------+ -| avg_weighted(`k2`, `k1`) | -+--------------------------+ -| 495.675 | -+--------------------------+ -1 row in set (0.02 sec) - -``` -### keywords - -AVG_WEIGHTED diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/avg.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/avg.md deleted file mode 100644 index 4fdf1f188525e3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/avg.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -{ - "title": "AVG", - "language": "en" -} ---- - - - - -## AVG -### Description -#### Syntax - -`AVG([DISTINCT] expr)` - - -Used to return the average value of the selected field - -Optional field DISTINCT parameters can be used to return the weighted average - -### example - -``` -mysql> SELECT datetime, AVG(cost_time) FROM log_statis group by datetime; -+---------------------+--------------------+ -| datetime | avg(`cost_time`) | -+---------------------+--------------------+ -| 2019-07-03 21:01:20 | 25.827794561933533 | -+---------------------+--------------------+ - -mysql> SELECT datetime, AVG(distinct cost_time) FROM log_statis group by datetime; -+---------------------+---------------------------+ -| datetime | avg(DISTINCT `cost_time`) | -+---------------------+---------------------------+ -| 2019-07-04 02:23:24 | 20.666666666666668 | -+---------------------+---------------------------+ - -``` -### keywords - -AVG diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/bitmap-agg.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/bitmap-agg.md deleted file mode 100644 index 260c7b6b855d6e..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/bitmap-agg.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -{ - "title": "BITMAP_AGG", - "language": "en" -} ---- - - - -## BITMAP_AGG -### description -#### Syntax - -`BITMAP_AGG(expr)` - - -This aggregating function returns a bitmap that aggregates the values of expr, excluding any null values. -The type of expr needs to be TINYINT, SMALLINT, INT, or BIGINT. - -### example -``` -MySQL > select `n_nationkey`, `n_name`, `n_regionkey` from `nation`; -+-------------+----------------+-------------+ -| n_nationkey | n_name | n_regionkey | -+-------------+----------------+-------------+ -| 0 | ALGERIA | 0 | -| 1 | ARGENTINA | 1 | -| 2 | BRAZIL | 1 | -| 3 | CANADA | 1 | -| 4 | EGYPT | 4 | -| 5 | ETHIOPIA | 0 | -| 6 | FRANCE | 3 | -| 7 | GERMANY | 3 | -| 8 | INDIA | 2 | -| 9 | INDONESIA | 2 | -| 10 | IRAN | 4 | -| 11 | IRAQ | 4 | -| 12 | JAPAN | 2 | -| 13 | JORDAN | 4 | -| 14 | KENYA | 0 | -| 15 | MOROCCO | 0 | -| 16 | MOZAMBIQUE | 0 | -| 17 | PERU | 1 | -| 18 | CHINA | 2 | -| 19 | ROMANIA | 3 | -| 20 | SAUDI ARABIA | 4 | -| 21 | VIETNAM | 2 | -| 22 | RUSSIA | 3 | -| 23 | UNITED KINGDOM | 3 | -| 24 | UNITED STATES | 1 | -+-------------+----------------+-------------+ - -MySQL > select n_regionkey, bitmap_to_string(bitmap_agg(n_nationkey)) from nation group by n_regionkey; -+-------------+---------------------------------------------+ -| n_regionkey | bitmap_to_string(bitmap_agg(`n_nationkey`)) | -+-------------+---------------------------------------------+ -| 4 | 4,10,11,13,20 | -| 2 | 8,9,12,18,21 | -| 1 | 1,2,3,17,24 | -| 0 | 0,5,14,15,16 | -| 3 | 6,7,19,22,23 | -+-------------+---------------------------------------------+ - -MySQL > select bitmap_count(bitmap_agg(n_nationkey)) from nation; -+-----------------------------------------+ -| bitmap_count(bitmap_agg(`n_nationkey`)) | -+-----------------------------------------+ -| 25 | -+-----------------------------------------+ -``` -### keywords -BITMAP_AGG diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/bitmap-union.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/bitmap-union.md deleted file mode 100644 index 8afe22c5be5039..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/bitmap-union.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -{ - "title": "BITMAP_UNION", - "language": "en" -} ---- - - - - -## BITMAP_UNION - -### description - -### example - -#### Create table - -The aggregation model needs to be used when creating the table. The data type is bitmap and the aggregation function is bitmap_union. -``` -CREATE TABLE `pv_bitmap` ( -  `dt` int (11) NULL COMMENT" ", -  `page` varchar (10) NULL COMMENT" ", -  `user_id` bitmap BITMAP_UNION NULL COMMENT" " -) ENGINE = OLAP -AGGREGATE KEY (`dt`,` page`) -COMMENT "OLAP" -DISTRIBUTED BY HASH (`dt`) BUCKETS 2; -``` - -Note: When the amount of data is large, it is best to create a corresponding rollup table for high-frequency bitmap_union queries - -``` -ALTER TABLE pv_bitmap ADD ROLLUP pv (page, user_id); -``` - -#### Data Load - -`TO_BITMAP (expr)`: Convert 0 ~ 18446744073709551615 unsigned bigint to bitmap - -`BITMAP_EMPTY ()`: Generate empty bitmap columns, used for insert or import to fill the default value - -`BITMAP_HASH (expr)` or `BITMAP_HASH64 (expr)`: Convert any type of column to a bitmap by hashing - -##### Stream Load - -``` -cat data | curl --location-trusted -u user: passwd -T--H "columns: dt, page, user_id, user_id = to_bitmap (user_id)" http: // host: 8410 / api / test / testDb / _stream_load -``` - -``` -cat data | curl --location-trusted -u user: passwd -T--H "columns: dt, page, user_id, user_id = bitmap_hash (user_id)" http: // host: 8410 / api / test / testDb / _stream_load -``` - -``` -cat data | curl --location-trusted -u user: passwd -T--H "columns: dt, page, user_id, user_id = bitmap_empty ()" http: // host: 8410 / api / test / testDb / _stream_load -``` - -##### Insert Into - -id2's column type is bitmap -``` -insert into bitmap_table1 select id, id2 from bitmap_table2; -``` - -id2's column type is bitmap -``` -INSERT INTO bitmap_table1 (id, id2) VALUES (1001, to_bitmap (1000)), (1001, to_bitmap (2000)); -``` - -id2's column type is bitmap -``` -insert into bitmap_table1 select id, bitmap_union (id2) from bitmap_table2 group by id; -``` - -id2's column type is int -``` -insert into bitmap_table1 select id, to_bitmap (id2) from table; -``` - -id2's column type is String -``` -insert into bitmap_table1 select id, bitmap_hash (id_string) from table; -``` - - -#### Data Query - -##### Syntax - - -`BITMAP_UNION (expr)`: Calculate the union of two Bitmaps. The return value is the new Bitmap value. - -`BITMAP_UNION_COUNT (expr)`: Calculate the cardinality of the union of two Bitmaps, equivalent to BITMAP_COUNT (BITMAP_UNION (expr)). It is recommended to use the BITMAP_UNION_COUNT function first, its performance is better than BITMAP_COUNT (BITMAP_UNION (expr)). - -`BITMAP_UNION_INT (expr)`: Count the number of different values ​​in columns of type TINYINT, SMALLINT and INT, return the sum of COUNT (DISTINCT expr) same - -`INTERSECT_COUNT (bitmap_column_to_count, filter_column, filter_values ​​...)`: The calculation satisfies -filter_column The cardinality of the intersection of multiple bitmaps of the filter. -bitmap_column_to_count is a column of type bitmap, filter_column is a column of varying dimensions, and filter_values ​​is a list of dimension values. - -##### Example - -The following SQL uses the pv_bitmap table above as an example: - -Calculate the deduplication value for user_id: - -``` -select bitmap_union_count (user_id) from pv_bitmap; - -select bitmap_count (bitmap_union (user_id)) from pv_bitmap; -``` - -Calculate the deduplication value of id: - -``` -select bitmap_union_int (id) from pv_bitmap; -``` - -Calculate the retention of user_id: - -``` -select intersect_count (user_id, page, 'meituan') as meituan_uv, -intersect_count (user_id, page, 'waimai') as waimai_uv, -intersect_count (user_id, page, 'meituan', 'waimai') as retention // Number of users appearing on both 'meituan' and 'waimai' pages -from pv_bitmap -where page in ('meituan', 'waimai'); -``` - -### keywords - -BITMAP, BITMAP_COUNT, BITMAP_EMPTY, BITMAP_UNION, BITMAP_UNION_INT, TO_BITMAP, BITMAP_UNION_COUNT, INTERSECT_COUNT diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/collect-list.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/collect-list.md deleted file mode 100644 index 3327be8d7db35a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/collect-list.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -{ - "title": "COLLECT_LIST", - "language": "en" -} ---- - - - -## COLLECT_LIST -### description -#### Syntax - -`ARRAY collect_list(expr)` - -Returns an array consisting of all values in expr within the group, and ,with the optional `max_size` parameter limits the size of the resulting array to `max_size` elements.The order of elements in the array is non-deterministic. NULL values are excluded. -It has an alias `group_array`. -### notice - -``` -Only supported in vectorized engine -``` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select k1,k2,k3 from collect_list_test order by k1; -+------+------------+-------+ -| k1 | k2 | k3 | -+------+------------+-------+ -| 1 | 2023-01-01 | hello | -| 2 | 2023-01-02 | NULL | -| 2 | 2023-01-02 | hello | -| 3 | NULL | world | -| 3 | 2023-01-02 | hello | -| 4 | 2023-01-02 | sql | -| 4 | 2023-01-03 | sql | -+------+------------+-------+ - -mysql> select collect_list(k1),collect_list(k1,3) from collect_list_test; -+-------------------------+--------------------------+ -| collect_list(`k1`) | collect_list(`k1`,3) | -+-------------------------+--------------------------+ -| [1,2,2,3,3,4,4] | [1,2,2] | -+-------------------------+--------------------------+ - -mysql> select k1,collect_list(k2),collect_list(k3,1) from collect_list_test group by k1 order by k1; -+------+-------------------------+--------------------------+ -| k1 | collect_list(`k2`) | collect_list(`k3`,1) | -+------+-------------------------+--------------------------+ -| 1 | [2023-01-01] | [hello] | -| 2 | [2023-01-02,2023-01-02] | [hello] | -| 3 | [2023-01-02] | [world] | -| 4 | [2023-01-02,2023-01-03] | [sql] | -+------+-------------------------+--------------------------+ - -``` - -### keywords -COLLECT_LIST,GROUP_ARRAY,COLLECT_SET,ARRAY diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/collect-set.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/collect-set.md deleted file mode 100644 index 42987df4e321e9..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/collect-set.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -{ - "title": "COLLECT_SET", - "language": "en" -} ---- - - - -## COLLECT_SET - - - -COLLECT_SET - - - -### description -#### Syntax - -`ARRAY collect_set(expr[,max_size])` - -Creates an array containing distinct elements from `expr`,with the optional `max_size` parameter limits the size of the resulting array to `max_size` elements. It has an alias `group_uniq_array`. - -### notice - -``` -Only supported in vectorized engine -``` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select k1,k2,k3 from collect_set_test order by k1; -+------+------------+-------+ -| k1 | k2 | k3 | -+------+------------+-------+ -| 1 | 2023-01-01 | hello | -| 2 | 2023-01-01 | NULL | -| 2 | 2023-01-02 | hello | -| 3 | NULL | world | -| 3 | 2023-01-02 | hello | -| 4 | 2023-01-02 | doris | -| 4 | 2023-01-03 | sql | -+------+------------+-------+ - -mysql> select collect_set(k1),collect_set(k1,2) from collect_set_test; -+-------------------------+--------------------------+ -| collect_set(`k1`) | collect_set(`k1`,2) | -+-------------------------+--------------------------+ -| [4,3,2,1] | [1,2] | -+----------------------------------------------------+ - -mysql> select k1,collect_set(k2),collect_set(k3,1) from collect_set_test group by k1 order by k1; -+------+-------------------------+--------------------------+ -| k1 | collect_set(`k2`) | collect_set(`k3`,1) | -+------+-------------------------+--------------------------+ -| 1 | [2023-01-01] | [hello] | -| 2 | [2023-01-01,2023-01-02] | [hello] | -| 3 | [2023-01-02] | [world] | -| 4 | [2023-01-02,2023-01-03] | [sql] | -+------+-------------------------+--------------------------+ - -``` - -### keywords -COLLECT_SET,GROUP_UNIQ_ARRAY,COLLECT_LIST,ARRAY \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/corr.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/corr.md deleted file mode 100644 index 862dbad02b17db..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/corr.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -{ - "title": "CORR", - "language": "en" -} ---- - - - -## CORR -### Description -#### Syntax - -` double corr(x, y)` - -Calculate the Pearson correlation coefficient, which is returned as the covariance of x and y divided by the product of the standard deviations of x and y. -If the standard deviation of x or y is 0, the result will be 0. - -### example - -``` -mysql> select corr(x,y) from baseall; -+---------------------+ -| corr(x, y) | -+---------------------+ -| 0.89442719099991586 | -+---------------------+ -1 row in set (0.21 sec) - -``` -### keywords -CORR diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/count-by-enum.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/count-by-enum.md deleted file mode 100644 index 379661d0cf14e8..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/count-by-enum.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -{ - "title": "COUNT_BY_ENUM", - "language": "en" -} ---- - - - -## COUNT_BY_ENUM - - - -COUNT_BY_ENUM - - - -### Description -#### Syntax - -`count_by_enum(expr1, expr2, ... , exprN);` - -Treats the data in a column as an enumeration and counts the number of values in each enumeration. Returns the number of enumerated values for each column, and the number of non-null values versus the number of null values. - -#### Arguments - -`expr1` — At least one input must be specified. The value is a column of type `STRING`. - -##### Returned value - -Returns a JSONArray string. - -For example: -```json -[{ - "cbe": { - "F": 100, - "M": 99 - }, - "notnull": 199, - "null": 1, - "all": 200 -}, { - "cbe": { - "20": 10, - "30": 5, - "35": 1 - }, - "notnull": 16, - "null": 184, - "all": 200 -}, { - "cbe": { - "China": 10, - "United States": 9, - "England": 20, - "Germany": 30 - }, - "notnull": 69, - "null": 131, - "all": 200 -}] -``` -Description: The return value is a JSON array string and the order of the internal objects is the order of the input parameters. -* cbe: count of non-null values based on enumeration values -* notnull: number of non-null values. -* null: number of null values -* all: total number, including both null and non-null values. - - -### example - -```sql -DROP TABLE IF EXISTS count_by_enum_test; - -CREATE TABLE count_by_enum_test( - `id` varchar(1024) NULL, - `f1` text REPLACE_IF_NOT_NULL NULL, - `f2` text REPLACE_IF_NOT_NULL NULL, - `f3` text REPLACE_IF_NOT_NULL NULL -) -AGGREGATE KEY(`id`) -DISTRIBUTED BY HASH(id) BUCKETS 3 -PROPERTIES ( - "replication_num" = "1" -); - -INSERT into count_by_enum_test (id, f1, f2, f3) values - (1, "F", "10", "China"), - (2, "F", "20", "China"), - (3, "M", NULL, "United States"), - (4, "M", NULL, "United States"), - (5, "M", NULL, "England"); - -SELECT * from count_by_enum_test; - -+------+------+------+---------------+ -| id | f1 | f2 | f3 | -+------+------+------+---------------+ -| 1 | F | 10 | China | -| 2 | F | 20 | China | -| 3 | M | NULL | United States | -| 4 | M | NULL | United States | -| 5 | M | NULL | England | -+------+------+------+---------------+ - -select count_by_enum(f1) from count_by_enum_test; - -+------------------------------------------------------+ -| count_by_enum(`f1`) | -+------------------------------------------------------+ -| [{"cbe":{"M":3,"F":2},"notnull":5,"null":0,"all":5}] | -+------------------------------------------------------+ - -select count_by_enum(f2) from count_by_enum_test; - -+--------------------------------------------------------+ -| count_by_enum(`f2`) | -+--------------------------------------------------------+ -| [{"cbe":{"10":1,"20":1},"notnull":2,"null":3,"all":5}] | -+--------------------------------------------------------+ - -select count_by_enum(f1,f2,f3) from count_by_enum_test; - -+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| count_by_enum(`f1`, `f2`, `f3`) | -+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| [{"cbe":{"M":3,"F":2},"notnull":5,"null":0,"all":5},{"cbe":{"20":1,"10":1},"notnull":2,"null":3,"all":5},{"cbe":{"England":1,"United States":2,"China":2},"notnull":5,"null":0,"all":5}] | -+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - -``` - -### keywords - -COUNT_BY_ENUM diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/count.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/count.md deleted file mode 100644 index 70c2be3f69db4d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/count.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "COUNT", - "language": "en" -} ---- - - - -## COUNT -### Description -#### Syntax - -`COUNT([DISTINCT] expr)` - - -Number of rows used to return the required rows - -### example - -``` -MySQL > select count(*) from log_statis group by datetime; -+----------+ -| count(*) | -+----------+ -| 28515903 | -+----------+ - -MySQL > select count(datetime) from log_statis group by datetime; -+-------------------+ -| count(`datetime`) | -+-------------------+ -| 28521682 | -+-------------------+ - -MySQL > select count(distinct datetime) from log_statis group by datetime; -+-------------------------------+ -| count(DISTINCT `datetime`) | -+-------------------------------+ -| 71045 | -+-------------------------------+ -``` -### keywords -COUNT diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/covar-samp.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/covar-samp.md deleted file mode 100644 index 0ce85588077cba..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/covar-samp.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "COVAR_SAMP", - "language": "en" -} ---- - - - -## COVAR_SAMP -### Description -#### Syntax - -` double covar_samp(x, y)` - -Calculate the sample covariance between x and y. - -### example - -``` -mysql> select covar_samp(x,y) from baseall; -+---------------------+ -| covar_samp(x, y) | -+---------------------+ -| 0.89442719099991586 | -+---------------------+ -1 row in set (0.21 sec) - -``` -### keywords -COVAR_SAMP diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/covar.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/covar.md deleted file mode 100644 index dfc5ca4f0f2c3c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/covar.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "COVAR,COVAR_POP", - "language": "en" -} ---- - - - -## COVAR,COVAR_POP -### Description -#### Syntax - -` double covar(x, y)` - -Calculate the covariance between x and y. - -### example - -``` -mysql> select covar(x,y) from baseall; -+---------------------+ -| covar(x, y) | -+---------------------+ -| 0.89442719099991586 | -+---------------------+ -1 row in set (0.21 sec) - -``` -### keywords -COVAR, COVAR_POP diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-bit-and.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-bit-and.md deleted file mode 100644 index e9dda3b46c7113..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-bit-and.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ - "title": "GROUP_BIT_AND", - "language": "en" -} ---- - - - -## group_bit_and -### description -#### Syntax - -`expr GROUP_BIT_AND(expr)` - -Perform an and calculation on expr, and return a new expr. -All ints are supported - -### example - -``` -mysql> select * from group_bit; -+-------+ -| value | -+-------+ -| 3 | -| 1 | -| 2 | -| 4 | -+-------+ -4 rows in set (0.02 sec) - -mysql> select group_bit_and(value) from group_bit; -+------------------------+ -| group_bit_and(`value`) | -+------------------------+ -| 0 | -+------------------------+ -``` - -### keywords - - GROUP_BIT_AND,BIT diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-bit-or.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-bit-or.md deleted file mode 100644 index 1ad5c96b53a1ff..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-bit-or.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ - "title": "GROUP_BIT_OR", - "language": "en" -} ---- - - - -## group_bit_or -### description -#### Syntax - -`expr GROUP_BIT_OR(expr)` - -Perform an or calculation on expr, and return a new expr. -All ints are supported - -### example - -``` -mysql> select * from group_bit; -+-------+ -| value | -+-------+ -| 3 | -| 1 | -| 2 | -| 4 | -+-------+ -4 rows in set (0.02 sec) - -mysql> select group_bit_or(value) from group_bit; -+-----------------------+ -| group_bit_or(`value`) | -+-----------------------+ -| 7 | -+-----------------------+ -``` - -### keywords - - GROUP_BIT_OR,BIT diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-bit-xor.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-bit-xor.md deleted file mode 100644 index 719c6a9e078ef6..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-bit-xor.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ - "title": "GROUP_BIT_XOR", - "language": "en" -} ---- - - - -## GROUP_BIT_XOR -### description -#### Syntax - -`expr GROUP_BIT_XOR(expr)` - -Perform an xor calculation on expr, and return a new expr. -All ints are supported - -### example - -``` -mysql> select * from group_bit; -+-------+ -| value | -+-------+ -| 3 | -| 1 | -| 2 | -| 4 | -+-------+ -4 rows in set (0.02 sec) - -mysql> select group_bit_xor(value) from group_bit; -+------------------------+ -| group_bit_xor(`value`) | -+------------------------+ -| 4 | -+------------------------+ -``` - -### keywords - - GROUP_BIT_XOR,BIT diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-bitmap-xor.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-bitmap-xor.md deleted file mode 100644 index b4eed3a00b9649..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-bitmap-xor.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "GROUP_BITMAP_XOR", - "language": "en" -} ---- - - - -## GROUP_BITMAP_XOR -### description -#### Syntax - -`BITMAP GROUP_BITMAP_XOR(expr)` - -Perform an xor calculation on expr, and return a new bitmap. - -### example - -``` -mysql> select page, bitmap_to_string(user_id) from pv_bitmap; -+------+-----------------------------+ -| page | bitmap_to_string(`user_id`) | -+------+-----------------------------+ -| m | 4,7,8 | -| m | 1,3,6,15 | -| m | 4,7 | -+------+-----------------------------+ - -mysql> select page, bitmap_to_string(group_bitmap_xor(user_id)) from pv_bitmap group by page; -+------+-----------------------------------------------+ -| page | bitmap_to_string(group_bitmap_xor(`user_id`)) | -+------+-----------------------------------------------+ -| m | 1,3,6,8,15 | -+------+-----------------------------------------------+ -``` - -### keywords - - GROUP_BITMAP_XOR,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-concat.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-concat.md deleted file mode 100644 index 85b825c985f596..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/group-concat.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -{ - "title": "GROUP_CONCAT", - "language": "en" -} ---- - - - -## GROUP_CONCAT -### description -#### Syntax - -`VARCHAR GROUP_CONCAT([DISTINCT] VARCHAR str[, VARCHAR sep] [ORDER BY { col_name | expr} [ASC | DESC]])` - - -This function is an aggregation function similar to sum (), and group_concat links multiple rows of results in the result set to a string. The second parameter, sep, is a connection symbol between strings, which can be omitted. This function usually needs to be used with group by statements. - - -Support Order By for sorting multi-row results, sorting and aggregation columns can be different. - -### example - -``` -mysql> select value from test; -+-------+ -| value | -+-------+ -| a | -| b | -| c | -| c | -+-------+ - -mysql> select GROUP_CONCAT(value) from test; -+-----------------------+ -| GROUP_CONCAT(`value`) | -+-----------------------+ -| a, b, c, c | -+-----------------------+ - -mysql> select GROUP_CONCAT(value, " ") from test; -+----------------------------+ -| GROUP_CONCAT(`value`, ' ') | -+----------------------------+ -| a b c c | -+----------------------------+ - -mysql> select GROUP_CONCAT(DISTINCT value) from test; -+-----------------------+ -| GROUP_CONCAT(`value`) | -+-----------------------+ -| a, b, c | -+-----------------------+ - -mysql> select GROUP_CONCAT(value, NULL) from test; -+----------------------------+ -| GROUP_CONCAT(`value`, NULL)| -+----------------------------+ -| NULL | -+----------------------------+ -``` - -### keywords -GROUP_CONCAT,GROUP,CONCAT diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/grouping-id.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/grouping-id.md deleted file mode 100755 index f32b7dc4be5d34..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/grouping-id.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -{ - "title": "GROUPING_ID", - "language": "en" -} ---- - - - -## GROUPING_ID - -### Name - -GROUPING_ID - -### Description - -Is a function that computes the level of grouping. `GROUPING_ID` can be used only in the `SELECT list`, `HAVING`, and `ORDER BY` clauses when `GROUP BY` is specified. - -`GROUPING` is used to distinguish the null values that are returned by `ROLLUP`, `CUBE` or `GROUPING SETS` from standard null values. The `NULL` returned as the result of a `ROLLUP`, `CUBE` or `GROUPING SETS` operation is a special use of `NULL`. This acts as a column placeholder in the result set and means all. - -```sql -GROUPING( ) -``` - -`` -Is a column or an expression that contains a column in a `GROUP BY` clause. - -Return Types: BIGINT - -### Example - -The following example groups `camp` and aggregates `occupation` amounts in the database. The `GROUPING` function is applied to the `camp` column. - -```sql -CREATE TABLE `roles` ( - role_id INT, - occupation VARCHAR(32), - camp VARCHAR(32), - register_time DATE -) -UNIQUE KEY(role_id) -DISTRIBUTED BY HASH(role_id) BUCKETS 1 -PROPERTIES ( - "replication_allocation" = "tag.location.default: 1" -); - -INSERT INTO `roles` VALUES -(0, 'who am I', NULL, NULL), -(1, 'mage', 'alliance', '2018-12-03 16:11:28'), -(2, 'paladin', 'alliance', '2018-11-30 16:11:28'), -(3, 'rogue', 'horde', '2018-12-01 16:11:28'), -(4, 'priest', 'alliance', '2018-12-02 16:11:28'), -(5, 'shaman', 'horde', NULL), -(6, 'warrior', 'alliance', NULL), -(7, 'warlock', 'horde', '2018-12-04 16:11:28'), -(8, 'hunter', 'horde', NULL); - -SELECT - camp, - COUNT(occupation) AS 'occ_cnt', - GROUPING(camp) AS 'grouping' -FROM - `roles` -GROUP BY - ROLLUP(camp); -- CUBE(camp) and GROUPING SETS((camp)) also can work; -``` - -The result set shows two null value under `camp`. The first NULL is in the summary row added by the `ROLLUP` operation. The summary row shows the occupation counts for all `camp` groups and is indicated by 1 in the Grouping column. The second NULL represents the group of null values from this column in the table. - -Here is the result set. - -```log -+----------+---------+----------+ -| camp | occ_cnt | grouping | -+----------+---------+----------+ -| NULL | 9 | 1 | -| NULL | 1 | 0 | -| alliance | 4 | 0 | -| horde | 4 | 0 | -+----------+---------+----------+ -4 rows in set (0.01 sec) -``` - -### Keywords - -GROUPING - -### Best Practice - -See also [GROUPING_ID](./grouping_id.md) diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/histogram.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/histogram.md deleted file mode 100644 index f16039f7390f18..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/histogram.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -{ - "title": "HISTOGRAM", - "language": "en" -} ---- - - - -## HISTOGRAM -### description -#### Syntax - -`histogram(expr[, INT num_buckets])` - -The histogram function is used to describe the distribution of the data. It uses an "equal height" bucking strategy, and divides the data into buckets according to the value of the data. It describes each bucket with some simple data, such as the number of values that fall in the bucket. It is mainly used by the optimizer to estimate the range query. - -The result of the function returns an empty or Json string. - -Parameter description: -- num_buckets:Optional. Limit the number of histogram buckets. The default value is 128. - -Alias function: `hist(expr[, INT num_buckets])` - -### notice - -> Only supported in vectorized engine - -### example - -``` -MySQL [test]> SELECT histogram(c_float) FROM histogram_test; -+-------------------------------------------------------------------------------------------------------------------------------------+ -| histogram(`c_float`) | -+-------------------------------------------------------------------------------------------------------------------------------------+ -| {"num_buckets":3,"buckets":[{"lower":"0.1","upper":"0.1","count":1,"pre_sum":0,"ndv":1},...]} | -+-------------------------------------------------------------------------------------------------------------------------------------+ - -MySQL [test]> SELECT histogram(c_string, 2) FROM histogram_test; -+-------------------------------------------------------------------------------------------------------------------------------------+ -| histogram(`c_string`) | -+-------------------------------------------------------------------------------------------------------------------------------------+ -| {"num_buckets":2,"buckets":[{"lower":"str1","upper":"str7","count":4,"pre_sum":0,"ndv":3},...]} | -+-------------------------------------------------------------------------------------------------------------------------------------+ -``` - -Query result description: - -``` -{ - "num_buckets": 3, - "buckets": [ - { - "lower": "0.1", - "upper": "0.2", - "count": 2, - "pre_sum": 0, - "ndv": 2 - }, - { - "lower": "0.8", - "upper": "0.9", - "count": 2, - "pre_sum": 2, - "ndv": 2 - }, - { - "lower": "1.0", - "upper": "1.0", - "count": 2, - "pre_sum": 4, - "ndv": 1 - } - ] -} -``` - -Field description: -- num_buckets:The number of buckets -- buckets:All buckets - - lower:Upper bound of the bucket - - upper:Lower bound of the bucket - - count:The number of elements contained in the bucket - - pre_sum:The total number of elements in the front bucket - - ndv:The number of different values in the bucket - -> Total number of histogram elements = number of elements in the last bucket(count) + total number of elements in the previous bucket(pre_sum). - -### keywords - -HISTOGRAM, HIST diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/hll-union-agg.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/hll-union-agg.md deleted file mode 100644 index 49d6da2d682b08..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/hll-union-agg.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -{ - "title": "HLL_UNION_AGG", - "language": "en" -} ---- - - - -## HLL_UNION_AGG -### description -#### Syntax - -`HLL_UNION_AGG(hll)` - - -HLL is an engineering implementation based on HyperLog algorithm, which is used to save the intermediate results of HyperLog calculation process. - -It can only be used as the value column type of the table and reduce the amount of data through aggregation to achieve the purpose of speeding up the query. - -Based on this, we get an estimate with an error of about 1%. The HLL column is generated by other columns or data imported into the data. - -When importing, hll_hash function is used to specify which column in data is used to generate HLL column. It is often used to replace count distinct, and to calculate UV quickly in business by combining rollup. - -### example -``` -MySQL > select HLL_UNION_AGG(uv_set) from test_uv;; -+-------------------------+ -THE COURT OF JUSTICE OF THE EUROPEAN COMMUNITIES, -+-------------------------+ -| 17721 | -+-------------------------+ -``` -### keywords -HLL_UNION_AGG,HLL,UNION,AGG diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/map-agg.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/map-agg.md deleted file mode 100644 index b4c5ff616448ae..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/map-agg.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -{ - "title": "MAP_AGG", - "language": "en" -} ---- - - - -## MAP_AGG -### description -#### Syntax - -`MAP_AGG(expr1, expr2)` - - -Returns a map consists of expr1 as the key and expr2 as the corresponding value. - -### example -``` -MySQL > select `n_nationkey`, `n_name`, `n_regionkey` from `nation`; -+-------------+----------------+-------------+ -| n_nationkey | n_name | n_regionkey | -+-------------+----------------+-------------+ -| 0 | ALGERIA | 0 | -| 1 | ARGENTINA | 1 | -| 2 | BRAZIL | 1 | -| 3 | CANADA | 1 | -| 4 | EGYPT | 4 | -| 5 | ETHIOPIA | 0 | -| 6 | FRANCE | 3 | -| 7 | GERMANY | 3 | -| 8 | INDIA | 2 | -| 9 | INDONESIA | 2 | -| 10 | IRAN | 4 | -| 11 | IRAQ | 4 | -| 12 | JAPAN | 2 | -| 13 | JORDAN | 4 | -| 14 | KENYA | 0 | -| 15 | MOROCCO | 0 | -| 16 | MOZAMBIQUE | 0 | -| 17 | PERU | 1 | -| 18 | CHINA | 2 | -| 19 | ROMANIA | 3 | -| 20 | SAUDI ARABIA | 4 | -| 21 | VIETNAM | 2 | -| 22 | RUSSIA | 3 | -| 23 | UNITED KINGDOM | 3 | -| 24 | UNITED STATES | 1 | -+-------------+----------------+-------------+ - -MySQL > select `n_regionkey`, map_agg(`n_nationkey`, `n_name`) from `nation` group by `n_regionkey`; -+-------------+---------------------------------------------------------------------------+ -| n_regionkey | map_agg(`n_nationkey`, `n_name`) | -+-------------+---------------------------------------------------------------------------+ -| 1 | {1:"ARGENTINA", 2:"BRAZIL", 3:"CANADA", 17:"PERU", 24:"UNITED STATES"} | -| 0 | {0:"ALGERIA", 5:"ETHIOPIA", 14:"KENYA", 15:"MOROCCO", 16:"MOZAMBIQUE"} | -| 3 | {6:"FRANCE", 7:"GERMANY", 19:"ROMANIA", 22:"RUSSIA", 23:"UNITED KINGDOM"} | -| 4 | {4:"EGYPT", 10:"IRAN", 11:"IRAQ", 13:"JORDAN", 20:"SAUDI ARABIA"} | -| 2 | {8:"INDIA", 9:"INDONESIA", 12:"JAPAN", 18:"CHINA", 21:"VIETNAM"} | -+-------------+---------------------------------------------------------------------------+ - -MySQL > select n_regionkey, map_agg(`n_name`, `n_nationkey` % 5) from `nation` group by `n_regionkey`; -+-------------+------------------------------------------------------------------------+ -| n_regionkey | map_agg(`n_name`, (`n_nationkey` % 5)) | -+-------------+------------------------------------------------------------------------+ -| 2 | {"INDIA":3, "INDONESIA":4, "JAPAN":2, "CHINA":3, "VIETNAM":1} | -| 0 | {"ALGERIA":0, "ETHIOPIA":0, "KENYA":4, "MOROCCO":0, "MOZAMBIQUE":1} | -| 3 | {"FRANCE":1, "GERMANY":2, "ROMANIA":4, "RUSSIA":2, "UNITED KINGDOM":3} | -| 1 | {"ARGENTINA":1, "BRAZIL":2, "CANADA":3, "PERU":2, "UNITED STATES":4} | -| 4 | {"EGYPT":4, "IRAN":0, "IRAQ":1, "JORDAN":3, "SAUDI ARABIA":0} | -+-------------+------------------------------------------------------------------------+ -``` -### keywords -MAP_AGG diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/max-by.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/max-by.md deleted file mode 100644 index df3096287e92bd..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/max-by.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "MAX_BY", - "language": "en" -} ---- - - - -## MAX_BY -### description -#### Syntax - -`MAX_BY(expr1, expr2)` - - -Returns the value of an expr1 associated with the maximum value of expr2 in a group. - -### example -``` -MySQL > select * from tbl; -+------+------+------+------+ -| k1 | k2 | k3 | k4 | -+------+------+------+------+ -| 0 | 3 | 2 | 100 | -| 1 | 2 | 3 | 4 | -| 4 | 3 | 2 | 1 | -| 3 | 4 | 2 | 1 | -+------+------+------+------+ - -MySQL > select max_by(k1, k4) from tbl; -+--------------------+ -| max_by(`k1`, `k4`) | -+--------------------+ -| 0 | -+--------------------+ -``` -### keywords -MAX_BY diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/max.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/max.md deleted file mode 100644 index 68dd6183217c57..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/max.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -{ - "title": "MAX", - "language": "en" -} ---- - - - -## MAX -### description -#### Syntax - -`MAX(expr)` - - -Returns the maximum value of an expr expression - -### example -``` -MySQL > select max(scan_rows) from log_statis group by datetime; -+------------------+ -| max(`scan_rows`) | -+------------------+ -| 4671587 | -+------------------+ -``` -### keywords -MAX diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/min-by.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/min-by.md deleted file mode 100644 index 52373088a4382b..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/min-by.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "MIN_BY", - "language": "en" -} ---- - - - -## MIN_BY -### description -#### Syntax - -`MIN_BY(expr1, expr2)` - - -Returns the value of an expr1 associated with the minimum value of expr2 in a group. - -### example -``` -MySQL > select * from tbl; -+------+------+------+------+ -| k1 | k2 | k3 | k4 | -+------+------+------+------+ -| 0 | 3 | 2 | 100 | -| 1 | 2 | 3 | 4 | -| 4 | 3 | 2 | 1 | -| 3 | 4 | 2 | 1 | -+------+------+------+------+ - -MySQL > select min_by(k1, k4) from tbl; -+--------------------+ -| min_by(`k1`, `k4`) | -+--------------------+ -| 4 | -+--------------------+ -``` -### keywords -MIN_BY diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/min.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/min.md deleted file mode 100644 index 76915e2f1f1993..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/min.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -{ - "title": "MIN", - "language": "en" -} ---- - - - -## MIN -### Description -#### Syntax - -`MIN(expr)` - - -Returns the minimum value of an expr expression - -### example -``` -MySQL > select min(scan_rows) from log_statis group by datetime; -+------------------+ -| min(`scan_rows`) | -+------------------+ -| 0 | -+------------------+ -``` -### keywords -MIN diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/percentile-approx.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/percentile-approx.md deleted file mode 100644 index 627ebc09797a15..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/percentile-approx.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "PERCENTILE_APPROX", - "language": "en" -} ---- - - - -## PERCENTILE_APPROX -### Description -#### Syntax - -`PERCENTILE_APPROX(expr, DOUBLE p[, DOUBLE compression])` - -Return the approximation of the point p, where the value of P is between 0 and 1. - -Compression param is optional and can be setted to a value in the range of [2048, 10000]. The bigger compression you set, the more precise result and more time cost you will get. If it is not setted or not setted in the correct range, PERCENTILE_APPROX function will run with a default compression param of 10000. - -This function uses fixed size memory, so less memory can be used for columns with high cardinality, and can be used to calculate statistics such as tp99. - -### example -``` -MySQL > select `table`, percentile_approx(cost_time,0.99) from log_statis group by `table`; -+---------------------+---------------------------+ -| table | percentile_approx(`cost_time`, 0.99) | -+----------+--------------------------------------+ -| test | 54.22 | -+----------+--------------------------------------+ - -MySQL > select `table`, percentile_approx(cost_time,0.99, 4096) from log_statis group by `table`; -+---------------------+---------------------------+ -| table | percentile_approx(`cost_time`, 0.99, 4096.0) | -+----------+--------------------------------------+ -| test | 54.21 | -+----------+--------------------------------------+ -``` -### keywords -PERCENTILE_APPROX,PERCENTILE,APPROX diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/percentile-array.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/percentile-array.md deleted file mode 100644 index 6e23573721c81a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/percentile-array.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -{ - "title": "PERCENTILE_ARRAY", - "language": "en" -} ---- - - - -## PERCENTILE_ARRAY -### Description -#### Syntax - -`ARRAY_DOUBLE PERCENTILE_ARRAY(BIGINT, ARRAY_DOUBLE p)` - -Calculate exact percentiles, suitable for small data volumes. Sorts the specified column in descending order first, then takes the exact pth percentile. -The return value is the result of sequentially taking the specified percentages in the array p. -Parameter Description: -expr: Required. Columns whose values are of type integer (up to bigint). -p: The exact percentile is required, an array of constants, taking the value [0.0, 1.0]. - -### example -``` -mysql> select percentile_array(k1,[0.3,0.5,0.9]) from baseall; -+----------------------------------------------+ -| percentile_array(`k1`, ARRAY(0.3, 0.5, 0.9)) | -+----------------------------------------------+ -| [5.2, 8, 13.6] | -+----------------------------------------------+ -1 row in set (0.02 sec) - -``` - -### keywords -PERCENTILE_ARRAY diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/percentile.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/percentile.md deleted file mode 100644 index 4d5cc411fdc536..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/percentile.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -{ - "title": "PERCENTILE", - "language": "en" -} ---- - - - -## PERCENTILE -### Description -#### Syntax - -`PERCENTILE(expr, DOUBLE p)` - -Calculate the exact percentile, suitable for small data volumes. Sort the specified column in descending order first, and then take the exact p-th percentile. The value of p is between 0 and 1 - -Parameter Description: -expr: required. The value is an integer (bigint at most). -p: The exact percentile is required. The const value is [0.0,1.0] - -### example -``` -MySQL > select `table`, percentile(cost_time,0.99) from log_statis group by `table`; -+---------------------+---------------------------+ -| table | percentile(`cost_time`, 0.99)| -+----------+--------------------------------------+ -| test | 54.22 | -+----------+--------------------------------------+ - -MySQL > select percentile(NULL,0.3) from table1; -+-----------------------+ -| percentile(NULL, 0.3) | -+-----------------------+ -| NULL | -+-----------------------+ - -``` - -### keywords -PERCENTILE diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/retention.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/retention.md deleted file mode 100644 index 2c6fbd865c3a35..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/retention.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -{ - "title": "RETENTION", - "language": "en" -} ---- - - - -## RETENTION - - - -RETENTION - - - -### Description -#### Syntax - -`retention(event1, event2, ... , eventN);` - -The `retention` function takes as arguments a set of conditions from 1 to 32 arguments of type `UInt8` that indicate whether a certain condition was met for the event. Any condition can be specified as an argument. - -The conditions, except the first, apply in pairs: the result of the second will be true if the first and second are true, of the third if the first and third are true, etc. - -To put it simply, the first digit of the return value array indicates whether `event1` is true or false, the second digit represents the truth and falseness of `event1` and `event2`, and the third digit represents whether `event1` is true or false and `event3` is true False and, and so on. If `event1` is false, return an array full of zeros. - -#### Arguments - -`event` — An expression that returns a `UInt8` result (1 or 0). - -##### Returned value - -An array of 1s and 0s with a maximum length of 32 bits, the final output array has the same length as the input parameter. - -1 — Condition was met for the event. - -0 — Condition wasn’t met for the event. - -### example - -```sql -DROP TABLE IF EXISTS retention_test; - -CREATE TABLE retention_test( - `uid` int COMMENT 'user id', - `date` datetime COMMENT 'date time' - ) -DUPLICATE KEY(uid) -DISTRIBUTED BY HASH(uid) BUCKETS 3 -PROPERTIES ( - "replication_num" = "1" -); - -INSERT into retention_test (uid, date) values (0, '2022-10-12'), - (0, '2022-10-13'), - (0, '2022-10-14'), - (1, '2022-10-12'), - (1, '2022-10-13'), - (2, '2022-10-12'); - -SELECT * from retention_test; - -+------+---------------------+ -| uid | date | -+------+---------------------+ -| 0 | 2022-10-14 00:00:00 | -| 0 | 2022-10-13 00:00:00 | -| 0 | 2022-10-12 00:00:00 | -| 1 | 2022-10-13 00:00:00 | -| 1 | 2022-10-12 00:00:00 | -| 2 | 2022-10-12 00:00:00 | -+------+---------------------+ - -SELECT - uid, - retention(date = '2022-10-12') - AS r - FROM retention_test - GROUP BY uid - ORDER BY uid ASC; - -+------+------+ -| uid | r | -+------+------+ -| 0 | [1] | -| 1 | [1] | -| 2 | [1] | -+------+------+ - -SELECT - uid, - retention(date = '2022-10-12', date = '2022-10-13') - AS r - FROM retention_test - GROUP BY uid - ORDER BY uid ASC; - -+------+--------+ -| uid | r | -+------+--------+ -| 0 | [1, 1] | -| 1 | [1, 1] | -| 2 | [1, 0] | -+------+--------+ - -SELECT - uid, - retention(date = '2022-10-12', date = '2022-10-13', date = '2022-10-14') - AS r - FROM retention_test - GROUP BY uid - ORDER BY uid ASC; - -+------+-----------+ -| uid | r | -+------+-----------+ -| 0 | [1, 1, 1] | -| 1 | [1, 1, 0] | -| 2 | [1, 0, 0] | -+------+-----------+ - -``` - -### keywords - -RETENTION diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/sequence-count.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/sequence-count.md deleted file mode 100644 index 68abb404dcaad7..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/sequence-count.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -{ - "title": "SEQUENCE-COUNT", - "language": "en" -} ---- - - - -## SEQUENCE-COUNT -### Description -#### Syntax - -`sequence_count(pattern, timestamp, cond1, cond2, ...);` - -Counts the number of event chains that matched the pattern. The function searches event chains that do not overlap. It starts to search for the next chain after the current chain is matched. - -**WARNING!** - -Events that occur at the same second may lay in the sequence in an undefined order affecting the result. - -#### Arguments - -`pattern` — Pattern string. - -**Pattern syntax** - -`(?N)` — Matches the condition argument at position N. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter. - -`.*` — Matches any number of events. You do not need conditional arguments to count this element of the pattern. - -`(?t operator value)` — Sets the time in seconds that should separate two events. - -We define `t` as the difference in seconds between two times, For example, pattern `(?1)(?t>1800)(?2)` matches events that occur more than 1800 seconds from each other. pattern `(?1)(?t>10000)(?2)` matches events that occur more than 10000 seconds from each other. An arbitrary number of any events can lay between these events. You can use the `>=`, `>`, `<`, `<=`, `==` operators. - -`timestamp` — Column considered to contain time data. Typical data types are `Date` and `DateTime`. You can also use any of the supported UInt data types. - -`cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. You can pass up to 32 condition arguments. The function takes only the events described in these conditions into account. If the sequence contains data that isn’t described in a condition, the function skips them. - -#### Returned value - -Number of non-overlapping event chains that are matched. - -### example - -**count examples** - -```sql -DROP TABLE IF EXISTS sequence_count_test2; - -CREATE TABLE sequence_count_test2( - `uid` int COMMENT 'user id', - `date` datetime COMMENT 'date time', - `number` int NULL COMMENT 'number' - ) -DUPLICATE KEY(uid) -DISTRIBUTED BY HASH(uid) BUCKETS 3 -PROPERTIES ( - "replication_num" = "1" -); - -INSERT INTO sequence_count_test2(uid, date, number) values (1, '2022-11-02 10:41:00', 1), - (2, '2022-11-02 13:28:02', 2), - (3, '2022-11-02 16:15:01', 1), - (4, '2022-11-02 19:05:04', 2), - (5, '2022-11-02 20:08:44', 3); - -SELECT * FROM sequence_count_test2 ORDER BY date; - -+------+---------------------+--------+ -| uid | date | number | -+------+---------------------+--------+ -| 1 | 2022-11-02 10:41:00 | 1 | -| 2 | 2022-11-02 13:28:02 | 2 | -| 3 | 2022-11-02 16:15:01 | 1 | -| 4 | 2022-11-02 19:05:04 | 2 | -| 5 | 2022-11-02 20:08:44 | 3 | -+------+---------------------+--------+ - -SELECT sequence_count('(?1)(?2)', date, number = 1, number = 3) FROM sequence_count_test2; - -+----------------------------------------------------------------+ -| sequence_count('(?1)(?2)', `date`, `number` = 1, `number` = 3) | -+----------------------------------------------------------------+ -| 1 | -+----------------------------------------------------------------+ - -SELECT sequence_count('(?1)(?2)', date, number = 1, number = 2) FROM sequence_count_test2; - -+----------------------------------------------------------------+ -| sequence_count('(?1)(?2)', `date`, `number` = 1, `number` = 2) | -+----------------------------------------------------------------+ -| 2 | -+----------------------------------------------------------------+ - -SELECT sequence_count('(?1)(?t>=10000)(?2)', date, number = 1, number = 2) FROM sequence_count_test1; - -+---------------------------------------------------------------------------+ -| sequence_count('(?1)(?t>=3600)(?2)', `date`, `number` = 1, `number` = 2) | -+---------------------------------------------------------------------------+ -| 2 | -+---------------------------------------------------------------------------+ -``` - -**not count examples** - -```sql -DROP TABLE IF EXISTS sequence_count_test1; - -CREATE TABLE sequence_count_test1( - `uid` int COMMENT 'user id', - `date` datetime COMMENT 'date time', - `number` int NULL COMMENT 'number' - ) -DUPLICATE KEY(uid) -DISTRIBUTED BY HASH(uid) BUCKETS 3 -PROPERTIES ( - "replication_num" = "1" -); - -INSERT INTO sequence_count_test1(uid, date, number) values (1, '2022-11-02 10:41:00', 1), - (2, '2022-11-02 11:41:00', 7), - (3, '2022-11-02 16:15:01', 3), - (4, '2022-11-02 19:05:04', 4), - (5, '2022-11-02 21:24:12', 5); - -SELECT * FROM sequence_count_test1 ORDER BY date; - -+------+---------------------+--------+ -| uid | date | number | -+------+---------------------+--------+ -| 1 | 2022-11-02 10:41:00 | 1 | -| 2 | 2022-11-02 11:41:00 | 7 | -| 3 | 2022-11-02 16:15:01 | 3 | -| 4 | 2022-11-02 19:05:04 | 4 | -| 5 | 2022-11-02 21:24:12 | 5 | -+------+---------------------+--------+ - -SELECT sequence_count('(?1)(?2)', date, number = 1, number = 2) FROM sequence_count_test1; - -+----------------------------------------------------------------+ -| sequence_count('(?1)(?2)', `date`, `number` = 1, `number` = 2) | -+----------------------------------------------------------------+ -| 0 | -+----------------------------------------------------------------+ - -SELECT sequence_count('(?1)(?2).*', date, number = 1, number = 2) FROM sequence_count_test1; - -+------------------------------------------------------------------+ -| sequence_count('(?1)(?2).*', `date`, `number` = 1, `number` = 2) | -+------------------------------------------------------------------+ -| 0 | -+------------------------------------------------------------------+ - -SELECT sequence_count('(?1)(?t>3600)(?2)', date, number = 1, number = 7) FROM sequence_count_test1; - -+--------------------------------------------------------------------------+ -| sequence_count('(?1)(?t>3600)(?2)', `date`, `number` = 1, `number` = 7) | -+--------------------------------------------------------------------------+ -| 0 | -+--------------------------------------------------------------------------+ -``` - -**special examples** - -```sql -DROP TABLE IF EXISTS sequence_count_test3; - -CREATE TABLE sequence_count_test3( - `uid` int COMMENT 'user id', - `date` datetime COMMENT 'date time', - `number` int NULL COMMENT 'number' - ) -DUPLICATE KEY(uid) -DISTRIBUTED BY HASH(uid) BUCKETS 3 -PROPERTIES ( - "replication_num" = "1" -); - -INSERT INTO sequence_count_test3(uid, date, number) values (1, '2022-11-02 10:41:00', 1), - (2, '2022-11-02 11:41:00', 7), - (3, '2022-11-02 16:15:01', 3), - (4, '2022-11-02 19:05:04', 4), - (5, '2022-11-02 21:24:12', 5); - -SELECT * FROM sequence_count_test3 ORDER BY date; - -+------+---------------------+--------+ -| uid | date | number | -+------+---------------------+--------+ -| 1 | 2022-11-02 10:41:00 | 1 | -| 2 | 2022-11-02 11:41:00 | 7 | -| 3 | 2022-11-02 16:15:01 | 3 | -| 4 | 2022-11-02 19:05:04 | 4 | -| 5 | 2022-11-02 21:24:12 | 5 | -+------+---------------------+--------+ -``` - -Perform the query: - -```sql -SELECT sequence_count('(?1)(?2)', date, number = 1, number = 5) FROM sequence_count_test3; - -+----------------------------------------------------------------+ -| sequence_count('(?1)(?2)', `date`, `number` = 1, `number` = 5) | -+----------------------------------------------------------------+ -| 1 | -+----------------------------------------------------------------+ -``` - -This is a very simple example. The function found the event chain where number 5 follows number 1. It skipped number 7,3,4 between them, because the number is not described as an event. If we want to take this number into account when searching for the event chain given in the example, we should make a condition for it. - -Now, perform this query: - -```sql -SELECT sequence_count('(?1)(?2)', date, number = 1, number = 5, number = 4) FROM sequence_count_test3; - -+------------------------------------------------------------------------------+ -| sequence_count('(?1)(?2)', `date`, `number` = 1, `number` = 5, `number` = 4) | -+------------------------------------------------------------------------------+ -| 0 | -+------------------------------------------------------------------------------+ -``` - -The result is kind of confusing. In this case, the function couldn’t find the event chain matching the pattern, because the event for number 4 occurred between 1 and 5. If in the same case we checked the condition for number 6, the sequence would count the pattern. - -```sql -SELECT sequence_count('(?1)(?2)', date, number = 1, number = 5, number = 6) FROM sequence_count_test3; - -+------------------------------------------------------------------------------+ -| sequence_count('(?1)(?2)', `date`, `number` = 1, `number` = 5, `number` = 6) | -+------------------------------------------------------------------------------+ -| 1 | -+------------------------------------------------------------------------------+ -``` - -### keywords - -SEQUENCE_COUNT \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/sequence-match.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/sequence-match.md deleted file mode 100644 index c088fc2e746f25..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/sequence-match.md +++ /dev/null @@ -1,256 +0,0 @@ ---- -{ - "title": "SEQUENCE_MATCH", - "language": "en" -} ---- - - - -## SEQUENCE-MATCH -### Description -#### Syntax - -`sequence_match(pattern, timestamp, cond1, cond2, ...);` - -Checks whether the sequence contains an event chain that matches the pattern. - -**WARNING!** - -Events that occur at the same second may lay in the sequence in an undefined order affecting the result. - -#### Arguments - -`pattern` — Pattern string. - -**Pattern syntax** - -`(?N)` — Matches the condition argument at position N. Conditions are numbered in the `[1, 32]` range. For example, `(?1)` matches the argument passed to the `cond1` parameter. - -`.*` — Matches any number of events. You do not need conditional arguments to match this element of the pattern. - -`(?t operator value)` — Sets the time in seconds that should separate two events. - -We define `t` as the difference in seconds between two times, For example, pattern `(?1)(?t>1800)(?2)` matches events that occur more than 1800 seconds from each other. pattern `(?1)(?t>10000)(?2)` matches events that occur more than 10000 seconds from each other. An arbitrary number of any events can lay between these events. You can use the `>=`, `>`, `<`, `<=`, `==` operators. - -`timestamp` — Column considered to contain time data. Typical data types are `Date` and `DateTime`. You can also use any of the supported UInt data types. - -`cond1`, `cond2` — Conditions that describe the chain of events. Data type: `UInt8`. You can pass up to 32 condition arguments. The function takes only the events described in these conditions into account. If the sequence contains data that isn’t described in a condition, the function skips them. - -#### Returned value - -1, if the pattern is matched. - -0, if the pattern isn’t matched. - -### example - -**match examples** - -```sql -DROP TABLE IF EXISTS sequence_match_test1; - -CREATE TABLE sequence_match_test1( - `uid` int COMMENT 'user id', - `date` datetime COMMENT 'date time', - `number` int NULL COMMENT 'number' - ) -DUPLICATE KEY(uid) -DISTRIBUTED BY HASH(uid) BUCKETS 3 -PROPERTIES ( - "replication_num" = "1" -); - -INSERT INTO sequence_match_test1(uid, date, number) values (1, '2022-11-02 10:41:00', 1), - (2, '2022-11-02 13:28:02', 2), - (3, '2022-11-02 16:15:01', 1), - (4, '2022-11-02 19:05:04', 2), - (5, '2022-11-02 20:08:44', 3); - -SELECT * FROM sequence_match_test1 ORDER BY date; - -+------+---------------------+--------+ -| uid | date | number | -+------+---------------------+--------+ -| 1 | 2022-11-02 10:41:00 | 1 | -| 2 | 2022-11-02 13:28:02 | 2 | -| 3 | 2022-11-02 16:15:01 | 1 | -| 4 | 2022-11-02 19:05:04 | 2 | -| 5 | 2022-11-02 20:08:44 | 3 | -+------+---------------------+--------+ - -SELECT sequence_match('(?1)(?2)', date, number = 1, number = 3) FROM sequence_match_test1; - -+----------------------------------------------------------------+ -| sequence_match('(?1)(?2)', `date`, `number` = 1, `number` = 3) | -+----------------------------------------------------------------+ -| 1 | -+----------------------------------------------------------------+ - -SELECT sequence_match('(?1)(?2)', date, number = 1, number = 2) FROM sequence_match_test1; - -+----------------------------------------------------------------+ -| sequence_match('(?1)(?2)', `date`, `number` = 1, `number` = 2) | -+----------------------------------------------------------------+ -| 1 | -+----------------------------------------------------------------+ - -SELECT sequence_match('(?1)(?t>=3600)(?2)', date, number = 1, number = 2) FROM sequence_match_test1; - -+---------------------------------------------------------------------------+ -| sequence_match('(?1)(?t>=3600)(?2)', `date`, `number` = 1, `number` = 2) | -+---------------------------------------------------------------------------+ -| 1 | -+---------------------------------------------------------------------------+ -``` - -**not match examples** - -```sql -DROP TABLE IF EXISTS sequence_match_test2; - -CREATE TABLE sequence_match_test2( - `uid` int COMMENT 'user id', - `date` datetime COMMENT 'date time', - `number` int NULL COMMENT 'number' - ) -DUPLICATE KEY(uid) -DISTRIBUTED BY HASH(uid) BUCKETS 3 -PROPERTIES ( - "replication_num" = "1" -); - -INSERT INTO sequence_match_test2(uid, date, number) values (1, '2022-11-02 10:41:00', 1), - (2, '2022-11-02 11:41:00', 7), - (3, '2022-11-02 16:15:01', 3), - (4, '2022-11-02 19:05:04', 4), - (5, '2022-11-02 21:24:12', 5); - -SELECT * FROM sequence_match_test2 ORDER BY date; - -+------+---------------------+--------+ -| uid | date | number | -+------+---------------------+--------+ -| 1 | 2022-11-02 10:41:00 | 1 | -| 2 | 2022-11-02 11:41:00 | 7 | -| 3 | 2022-11-02 16:15:01 | 3 | -| 4 | 2022-11-02 19:05:04 | 4 | -| 5 | 2022-11-02 21:24:12 | 5 | -+------+---------------------+--------+ - -SELECT sequence_match('(?1)(?2)', date, number = 1, number = 2) FROM sequence_match_test2; - -+----------------------------------------------------------------+ -| sequence_match('(?1)(?2)', `date`, `number` = 1, `number` = 2) | -+----------------------------------------------------------------+ -| 0 | -+----------------------------------------------------------------+ - -SELECT sequence_match('(?1)(?2).*', date, number = 1, number = 2) FROM sequence_match_test2; - -+------------------------------------------------------------------+ -| sequence_match('(?1)(?2).*', `date`, `number` = 1, `number` = 2) | -+------------------------------------------------------------------+ -| 0 | -+------------------------------------------------------------------+ - -SELECT sequence_match('(?1)(?t>3600)(?2)', date, number = 1, number = 7) FROM sequence_match_test2; - -+--------------------------------------------------------------------------+ -| sequence_match('(?1)(?t>3600)(?2)', `date`, `number` = 1, `number` = 7) | -+--------------------------------------------------------------------------+ -| 0 | -+--------------------------------------------------------------------------+ -``` - -**special examples** - -```sql -DROP TABLE IF EXISTS sequence_match_test3; - -CREATE TABLE sequence_match_test3( - `uid` int COMMENT 'user id', - `date` datetime COMMENT 'date time', - `number` int NULL COMMENT 'number' - ) -DUPLICATE KEY(uid) -DISTRIBUTED BY HASH(uid) BUCKETS 3 -PROPERTIES ( - "replication_num" = "1" -); - -INSERT INTO sequence_match_test3(uid, date, number) values (1, '2022-11-02 10:41:00', 1), - (2, '2022-11-02 11:41:00', 7), - (3, '2022-11-02 16:15:01', 3), - (4, '2022-11-02 19:05:04', 4), - (5, '2022-11-02 21:24:12', 5); - -SELECT * FROM sequence_match_test3 ORDER BY date; - -+------+---------------------+--------+ -| uid | date | number | -+------+---------------------+--------+ -| 1 | 2022-11-02 10:41:00 | 1 | -| 2 | 2022-11-02 11:41:00 | 7 | -| 3 | 2022-11-02 16:15:01 | 3 | -| 4 | 2022-11-02 19:05:04 | 4 | -| 5 | 2022-11-02 21:24:12 | 5 | -+------+---------------------+--------+ -``` - -Perform the query: - -```sql -SELECT sequence_match('(?1)(?2)', date, number = 1, number = 5) FROM sequence_match_test3; - -+----------------------------------------------------------------+ -| sequence_match('(?1)(?2)', `date`, `number` = 1, `number` = 5) | -+----------------------------------------------------------------+ -| 1 | -+----------------------------------------------------------------+ -``` - -This is a very simple example. The function found the event chain where number 5 follows number 1. It skipped number 7,3,4 between them, because the number is not described as an event. If we want to take this number into account when searching for the event chain given in the example, we should make a condition for it. - -Now, perform this query: - -```sql -SELECT sequence_match('(?1)(?2)', date, number = 1, number = 5, number = 4) FROM sequence_match_test3; - -+------------------------------------------------------------------------------+ -| sequence_match('(?1)(?2)', `date`, `number` = 1, `number` = 5, `number` = 4) | -+------------------------------------------------------------------------------+ -| 0 | -+------------------------------------------------------------------------------+ -``` - -The result is kind of confusing. In this case, the function couldn’t find the event chain matching the pattern, because the event for number 4 occurred between 1 and 5. If in the same case we checked the condition for number 6, the sequence would match the pattern. - -```sql -SELECT sequence_match('(?1)(?2)', date, number = 1, number = 5, number = 6) FROM sequence_match_test3; - -+------------------------------------------------------------------------------+ -| sequence_match('(?1)(?2)', `date`, `number` = 1, `number` = 5, `number` = 6) | -+------------------------------------------------------------------------------+ -| 1 | -+------------------------------------------------------------------------------+ -``` - -### keywords - -SEQUENCE_MATCH \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/stddev-samp.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/stddev-samp.md deleted file mode 100644 index 086c7b4d841135..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/stddev-samp.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -{ - "title": "STDDEV_SAMP", - "language": "en" -} ---- - - - -## STDDEV_SAMP -### Description -#### Syntax - -`STDDEV SAMP (expr)` - - -Returns the sample standard deviation of the expr expression - -### example -``` -MySQL > select stddev_samp(scan_rows) from log_statis group by datetime; -+--------------------------+ -| stddev_samp(`scan_rows`) | -+--------------------------+ -| 2.372044195280762 | -+--------------------------+ -``` -### keywords -STDDEV SAMP,STDDEV,SAMP diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/stddev.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/stddev.md deleted file mode 100644 index d22a6d771792ae..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/stddev.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ - "title": "STDDEV,STDDEV_POP", - "language": "en" -} ---- - - - -## STDDEV,STDDEV_POP -### Description -#### Syntax - -`stddev (expl)` - - -Returns the standard deviation of the expr expression - -### example -``` -MySQL > select stddev(scan_rows) from log_statis group by datetime; -+---------------------+ -| stddev(`scan_rows`) | -+---------------------+ -| 2.3736656687790934 | -+---------------------+ - -MySQL > select stddev_pop(scan_rows) from log_statis group by datetime; -+-------------------------+ -| stddev_pop(`scan_rows`) | -+-------------------------+ -| 2.3722760595994914 | -+-------------------------+ -``` -### keywords -STDDEV,STDDEV_POP,POP diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/sum.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/sum.md deleted file mode 100644 index 10802b4707ae39..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/sum.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -{ - "title": "SUM", - "language": "en" -} ---- - - - -## SUM -### Description -#### Syntax - -`Sum (Expr)` - - -Used to return the sum of all values of the selected field - -### example -``` -MySQL > select sum(scan_rows) from log_statis group by datetime; -+------------------+ -| sum(`scan_rows`) | -+------------------+ -| 8217360135 | -+------------------+ -``` -### keywords -SUM diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/topn-array.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/topn-array.md deleted file mode 100644 index 0cb0b177a8b315..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/topn-array.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "TOPN_ARRAY", - "language": "en" -} ---- - - - -## TOPN_ARRAY -### description -#### Syntax - -`ARRAY topn_array(expr, INT top_num[, INT space_expand_rate])` - -The topn function uses the Space-Saving algorithm to calculate the top_num frequent items in expr, -and return an array about the top n nums, which is an approximation - -The space_expand_rate parameter is optional and is used to set the number of counters used in the Space-Saving algorithm -``` -counter numbers = top_num * space_expand_rate -``` -The higher value of space_expand_rate, the more accurate result will be. The default value is 50 - -### example -``` -mysql> select topn_array(k3,3) from baseall; -+--------------------------+ -| topn_array(`k3`, 3) | -+--------------------------+ -| [3021, 2147483647, 5014] | -+--------------------------+ -1 row in set (0.02 sec) - -mysql> select topn_array(k3,3,100) from baseall; -+--------------------------+ -| topn_array(`k3`, 3, 100) | -+--------------------------+ -| [3021, 2147483647, 5014] | -+--------------------------+ -1 row in set (0.02 sec) -``` -### keywords -TOPN, TOPN_ARRAY \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/topn-weighted.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/topn-weighted.md deleted file mode 100644 index 805ba0ac134ca7..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/topn-weighted.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ - "title": "TOPN_WEIGHTED", - "language": "en" -} ---- - - - -## TOPN_WEIGHTED -### description -#### Syntax - -`ARRAY topn_weighted(expr, BigInt weight, INT top_num[, INT space_expand_rate])` - -The topn_weighted function is calculated using the Space-Saving algorithm, and the sum of the weights in expr is the result of the top n numbers, which is an approximate value - -The space_expand_rate parameter is optional and is used to set the number of counters used in the Space-Saving algorithm -``` -counter numbers = top_num * space_expand_rate -``` -The higher value of space_expand_rate, the more accurate result will be. The default value is 50 - -### example -``` -mysql> select topn_weighted(k5,k1,3) from baseall; -+------------------------------+ -| topn_weighted(`k5`, `k1`, 3) | -+------------------------------+ -| [0, 243.325, 100.001] | -+------------------------------+ -1 row in set (0.02 sec) - -mysql> select topn_weighted(k5,k1,3,100) from baseall; -+-----------------------------------+ -| topn_weighted(`k5`, `k1`, 3, 100) | -+-----------------------------------+ -| [0, 243.325, 100.001] | -+-----------------------------------+ -1 row in set (0.02 sec) -``` -### keywords -TOPN, TOPN_WEIGHTED \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/topn.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/topn.md deleted file mode 100644 index 8ca8840b445335..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/topn.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "TOPN", - "language": "en" -} ---- - - - -## TOPN -### description -#### Syntax - -`topn(expr, INT top_num[, INT space_expand_rate])` - -The topn function uses the Space-Saving algorithm to calculate the top_num frequent items in expr, and the result is the -frequent items and their occurrence times, which is an approximation - -The space_expand_rate parameter is optional and is used to set the number of counters used in the Space-Saving algorithm -``` -counter numbers = top_num * space_expand_rate -``` -The higher value of space_expand_rate, the more accurate result will be. The default value is 50 - -### example -``` -MySQL [test]> select topn(keyword,10) from keyword_table where date>= '2020-06-01' and date <= '2020-06-19' ; -+------------------------------------------------------------------------------------------------------------+ -| topn(`keyword`, 10) | -+------------------------------------------------------------------------------------------------------------+ -| a:157, b:138, c:133, d:133, e:131, f:127, g:124, h:122, i:117, k:117 | -+------------------------------------------------------------------------------------------------------------+ - -MySQL [test]> select date,topn(keyword,10,100) from keyword_table where date>= '2020-06-17' and date <= '2020-06-19' group by date; -+------------+-----------------------------------------------------------------------------------------------+ -| date | topn(`keyword`, 10, 100) | -+------------+-----------------------------------------------------------------------------------------------+ -| 2020-06-19 | a:11, b:8, c:8, d:7, e:7, f:7, g:7, h:7, i:7, j:7 | -| 2020-06-18 | a:10, b:8, c:7, f:7, g:7, i:7, k:7, l:7, m:6, d:6 | -| 2020-06-17 | a:9, b:8, c:8, j:8, d:7, e:7, f:7, h:7, i:7, k:7 | -+------------+-----------------------------------------------------------------------------------------------+ -``` -### keywords -TOPN \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/var-samp.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/var-samp.md deleted file mode 100644 index 8fe7ad5a39f7ec..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/var-samp.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -{ - "title": "VARIANCE_SAMP,VARIANCE_SAMP", - "language": "en" -} ---- - - - -## VARIANCE_SAMP,VARIANCE_SAMP -### Description -#### Syntax - -`VAR SAMP (expr)` - - -Returns the sample variance of the expr expression - -### example -``` -MySQL > select var_samp(scan_rows) from log_statis group by datetime; -+-----------------------+ -| var_samp(`scan_rows`) | -+-----------------------+ -| 5.6227132145741789 | -+-----------------------+ -``` -### keywords -VAR SAMP, VARIANCE SAMP,VAR,SAMP,VARIANCE diff --git a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/variance.md b/docs/en/docs/sql-manual/sql-functions/aggregate-functions/variance.md deleted file mode 100644 index f58da61bf708f7..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/aggregate-functions/variance.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ - "title": "VARIANCE,VAR_POP,VARIANCE_POP", - "language": "en" -} ---- - - - -## VARIANCE,VAR_POP,VARIANCE_POP -### Description -#### Syntax - -`VARIANCE(expr)` - - -Returns the variance of the expr expression - -### example -``` -MySQL > select variance(scan_rows) from log_statis group by datetime; -+-----------------------+ -| variance(`scan_rows`) | -+-----------------------+ -| 5.6183332881176211 | -+-----------------------+ - -MySQL > select var_pop(scan_rows) from log_statis group by datetime; -+----------------------+ -| var_pop(`scan_rows`) | -+----------------------+ -| 5.6230744719006163 | -+----------------------+ -``` -### keywords -VARIANCE,VAR_POP,VARIANCE_POP,VAR,POP diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-apply.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-apply.md deleted file mode 100644 index b72625783ba46c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-apply.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -{ - "title": "ARRAY_APPLY", - "language": "en" -} ---- - - - -## array_apply - - - -array_apply - - - -### description - -Filter array to match specific binary condition - -#### Syntax - -```sql -array_apply(arr, op, val) -``` - -#### Arguments - -`arr` — The array to inspect. If it null, null will be returned. -`op` — The compare operation, op includes `=`, `>=`, `<=`, `>`, `<`, `!=`. Support const value only. -`val` — The compared value.If it null, null will be returned. Support const value only. - -#### Returned value - -The filtered array matched with condition. - -Type: Array. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> select array_apply([1, 2, 3, 4, 5], ">=", 2); -+--------------------------------------------+ -| array_apply(ARRAY(1, 2, 3, 4, 5), '>=', 2) | -+--------------------------------------------+ -| [2, 3, 4, 5] | -+--------------------------------------------+ -1 row in set (0.01 sec) - -mysql> select array_apply([1000000, 1000001, 1000002], "=", "1000002"); -+-------------------------------------------------------------+ -| array_apply(ARRAY(1000000, 1000001, 1000002), '=', 1000002) | -+-------------------------------------------------------------+ -| [1000002] | -+-------------------------------------------------------------+ -1 row in set (0.01 sec) -``` - -### keywords - -ARRAY,APPLY,ARRAY_APPLY \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-avg.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-avg.md deleted file mode 100644 index 49816cc2e7b444..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-avg.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -{ - "title": "ARRAY_AVG", - "language": "en" -} ---- - - - -## array_avg - - - -array_avg - - - -### description -#### Syntax - -`Array array_avg(arr)` - -Get the average of all elements in an array (`NULL` values are skipped). -When the array is empty or all elements in the array are `NULL` values, the function returns `NULL`. - -### example - -```shell -mysql> create table array_type_table(k1 INT, k2 Array) duplicate key (k1) - -> distributed by hash(k1) buckets 1 properties('replication_num' = '1'); -mysql> insert into array_type_table values (0, []), (1, [NULL]), (2, [1, 2, 3]), (3, [1, NULL, 3]); -mysql> set enable_vectorized_engine = true; # enable vectorized engine -mysql> select k2, array_avg(k2) from array_type_table; -+--------------+-----------------+ -| k2 | array_avg(`k2`) | -+--------------+-----------------+ -| [] | NULL | -| [NULL] | NULL | -| [1, 2, 3] | 2 | -| [1, NULL, 3] | 2 | -+--------------+-----------------+ -4 rows in set (0.01 sec) - -``` - -### keywords - -ARRAY,AVG,ARRAY_AVG - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-compact.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-compact.md deleted file mode 100644 index ed2fca78d5bcef..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-compact.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -{ - "title": "ARRAY_COMPACT", - "language": "en" -} ---- - - - -## array_compact - - - -array_compact - - - -### description - -Removes consecutive duplicate elements from an array. The order of result values is determined by the order in the source array. - -#### Syntax - -`Array array_compact(arr)` - -#### Arguments - -`arr` — The array to inspect. - -#### Returned value - -The array without continuous duplicate. - -Type: Array. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -select array_compact([1, 2, 3, 3, null, null, 4, 4]); - -+----------------------------------------------------+ -| array_compact(ARRAY(1, 2, 3, 3, NULL, NULL, 4, 4)) | -+----------------------------------------------------+ -| [1, 2, 3, NULL, 4] | -+----------------------------------------------------+ - -select array_compact(['aaa','aaa','bbb','ccc','ccccc',null, null,'dddd']); - -+-------------------------------------------------------------------------------+ -| array_compact(ARRAY('aaa', 'aaa', 'bbb', 'ccc', 'ccccc', NULL, NULL, 'dddd')) | -+-------------------------------------------------------------------------------+ -| ['aaa', 'bbb', 'ccc', 'ccccc', NULL, 'dddd'] | -+-------------------------------------------------------------------------------+ - -select array_compact(['2015-03-13','2015-03-13']); - -+--------------------------------------------------+ -| array_compact(ARRAY('2015-03-13', '2015-03-13')) | -+--------------------------------------------------+ -| ['2015-03-13'] | -+--------------------------------------------------+ -``` - -### keywords - -ARRAY,COMPACT,ARRAY_COMPACT - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-concat.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-concat.md deleted file mode 100644 index 8e4987a9528ce2..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-concat.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -{ - "title": "ARRAY_CONCAT", - "language": "en" -} ---- - - - -## array_concat - - - -array_concat - - - -### description - -Concat all arrays passed in the arguments - -#### Syntax - -`Array array_concat(Array, ...)` - -#### Returned value - -The concated array. - -Type: Array. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> select array_concat([1, 2], [7, 8], [5, 6]); -+-----------------------------------------------------+ -| array_concat(ARRAY(1, 2), ARRAY(7, 8), ARRAY(5, 6)) | -+-----------------------------------------------------+ -| [1, 2, 7, 8, 5, 6] | -+-----------------------------------------------------+ -1 row in set (0.02 sec) - -mysql> select col2, col3, array_concat(col2, col3) from array_test; -+--------------+-----------+------------------------------+ -| col2 | col3 | array_concat(`col2`, `col3`) | -+--------------+-----------+------------------------------+ -| [1, 2, 3] | [3, 4, 5] | [1, 2, 3, 3, 4, 5] | -| [1, NULL, 2] | [NULL] | [1, NULL, 2, NULL] | -| [1, 2, 3] | NULL | NULL | -| [] | [] | [] | -+--------------+-----------+------------------------------+ -``` - -### keywords - -ARRAY,CONCAT,ARRAY_CONCAT \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-contains.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-contains.md deleted file mode 100644 index 9d7c7374591b1d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-contains.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -{ - "title": "ARRAY_CONTAINS", - "language": "en" -} ---- - - - -## array_contains - - - -array_contains - - - -### description - -#### Syntax - -`BOOLEAN array_contains(ARRAY arr, T value)` - -Check if a value presents in an array column. Return below values: - -``` -1 - if value presents in an array; -0 - if value does not present in an array; -NULL - when array is NULL; -``` - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> SELECT id,c_array,array_contains(c_array, 5) FROM `array_test`; -+------+-----------------+------------------------------+ -| id | c_array | array_contains(`c_array`, 5) | -+------+-----------------+------------------------------+ -| 1 | [1, 2, 3, 4, 5] | 1 | -| 2 | [6, 7, 8] | 0 | -| 3 | [] | 0 | -| 4 | NULL | NULL | -+------+-----------------+------------------------------+ - -mysql> select array_contains([null, 1], null); -+--------------------------------------+ -| array_contains(ARRAY(NULL, 1), NULL) | -+--------------------------------------+ -| 1 | -+--------------------------------------+ -1 row in set (0.00 sec) -``` - -### keywords - -ARRAY,CONTAIN,CONTAINS,ARRAY_CONTAINS - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-count.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-count.md deleted file mode 100644 index 2df68d18e07f6c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-count.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -{ - "title": "ARRAY_COUNT", - "language": "en" -} ---- - - - -## array_count - - - -array_count - - - -### description - -```sql -array_count(lambda, array1, ...) -``` - - -Use lambda expressions as input parameters to perform corresponding expression calculations on the internal data of other input ARRAY parameters. -Returns the number of elements such that the return value of `lambda(array1[i], ...)` is not 0. Returns 0 if no element is found that satisfies this condition. - -There are one or more parameters are input in the lambda expression, which must be consistent with the number of input array columns later.The number of elements of all input arrays must be the same. Legal scalar functions can be executed in lambda, aggregate functions, etc. are not supported. - - -``` -array_count(x->x, array1); -array_count(x->(x%2 = 0), array1); -array_count(x->(abs(x)-1), array1); -array_count((x,y)->(x = y), array1, array2); -``` - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> select array_count(x -> x, [0, 1, 2, 3]); -+--------------------------------------------------------+ -| array_count(array_map([x] -> x(0), ARRAY(0, 1, 2, 3))) | -+--------------------------------------------------------+ -| 3 | -+--------------------------------------------------------+ -1 row in set (0.00 sec) - -mysql> select array_count(x -> x > 2, [0, 1, 2, 3]); -+------------------------------------------------------------+ -| array_count(array_map([x] -> x(0) > 2, ARRAY(0, 1, 2, 3))) | -+------------------------------------------------------------+ -| 1 | -+------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql> select array_count(x -> x is null, [null, null, null, 1, 2]); -+----------------------------------------------------------------------------+ -| array_count(array_map([x] -> x(0) IS NULL, ARRAY(NULL, NULL, NULL, 1, 2))) | -+----------------------------------------------------------------------------+ -| 3 | -+----------------------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql> select array_count(x -> power(x,2)>10, [1, 2, 3, 4, 5]); -+------------------------------------------------------------------------------+ -| array_count(array_map([x] -> power(x(0), 2.0) > 10.0, ARRAY(1, 2, 3, 4, 5))) | -+------------------------------------------------------------------------------+ -| 2 | -+------------------------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql> select *, array_count((x, y) -> x>y, c_array1, c_array2) from array_test; -+------+-----------------+-------------------------+-----------------------------------------------------------------------+ -| id | c_array1 | c_array2 | array_count(array_map([x, y] -> x(0) > y(1), `c_array1`, `c_array2`)) | -+------+-----------------+-------------------------+-----------------------------------------------------------------------+ -| 1 | [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | 2 | -| 2 | [6, 7, 8] | [10, 12, 13] | 0 | -| 3 | [1] | [-100] | 1 | -| 4 | [1, NULL, 2] | [NULL, 3, 1] | 1 | -| 5 | [] | [] | 0 | -| 6 | NULL | NULL | 0 | -+------+-----------------+-------------------------+-----------------------------------------------------------------------+ -6 rows in set (0.02 sec) - -``` - -### keywords - -ARRAY, COUNT, ARRAY_COUNT - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-cum-sum.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-cum-sum.md deleted file mode 100644 index 9463982ae42632..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-cum-sum.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -{ - "title": "ARRAY_CUM_SUM", - "language": "en" -} ---- - - - -## array_cum_sum - - - -array_cum_sum - - - -### description - -Get the cumulative sum of an array (`NULL` values are skipped). -If the array contains `NULL` values, then `NULL` is set at the same position in the result array. - -#### Syntax - -```sql -Array array_cum_sum(Array) -``` - -### notice - -`Only supported in vectorized engine` - -### example - -```shell -mysql> create table array_type_table(k1 INT, k2 Array) duplicate key (k1) distributed by hash(k1) buckets 1 properties('replication_num' = '1'); -mysql> insert into array_type_table values (0, []), (1, [NULL]), (2, [1, 2, 3, 4]), (3, [1, NULL, 3, NULL, 5]); -mysql> set enable_vectorized_engine = true; # enable vectorized engine -mysql> select k2, array_cum_sum(k2) from array_type_table; -+-----------------------+-----------------------+ -| k2 | array_cum_sum(`k2`) | -+-----------------------+-----------------------+ -| [] | [] | -| [NULL] | [NULL] | -| [1, 2, 3, 4] | [1, 3, 6, 10] | -| [1, NULL, 3, NULL, 5] | [1, NULL, 4, NULL, 9] | -+-----------------------+-----------------------+ - -4 rows in set -Time: 0.122s -``` - -### keywords - -ARRAY,CUM_SUM,ARRAY_CUM_SUM \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-difference.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-difference.md deleted file mode 100644 index 9d6c9699f3af63..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-difference.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -{ - "title": "ARRAY_DIFFERENCE", - "language": "en" -} ---- - - - -## array_difference - - - -array_difference - - - -### description - -#### Syntax - -`ARRAY array_difference(ARRAY arr)` - -Calculates the difference between adjacent array elements. -Returns an array where the first element will be 0, the second is the difference between a[1] - a[0]. -need notice that NULL will be return NULL - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select *,array_difference(k2) from array_type_table; -+------+-----------------------------+---------------------------------+ -| k1 | k2 | array_difference(`k2`) | -+------+-----------------------------+---------------------------------+ -| 0 | [] | [] | -| 1 | [NULL] | [NULL] | -| 2 | [1, 2, 3] | [0, 1, 1] | -| 3 | [1, NULL, 3] | [0, NULL, NULL] | -| 4 | [0, 1, 2, 3, NULL, 4, 6] | [0, 1, 1, 1, NULL, NULL, 2] | -| 5 | [1, 2, 3, 4, 5, 4, 3, 2, 1] | [0, 1, 1, 1, 1, -1, -1, -1, -1] | -| 6 | [6, 7, 8] | [0, 1, 1] | -+------+-----------------------------+---------------------------------+ -``` - -### keywords - -ARRAY, DIFFERENCE, ARRAY_DIFFERENCE - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-distinct.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-distinct.md deleted file mode 100644 index 919a9eab14a942..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-distinct.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -{ - "title": "ARRAY_DISTINCT", - "language": "en" -} ---- - - - -## array_distinct - - - -array_distinct - - - -### description - -#### Syntax - -`ARRAY array_distinct(ARRAY arr)` - -Return the array which has been removed duplicate values. -Return NULL for NULL input. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select k1, k2, array_distinct(k2) from array_test; -+------+-----------------------------+---------------------------+ -| k1 | k2 | array_distinct(k2) | -+------+-----------------------------+---------------------------+ -| 1 | [1, 2, 3, 4, 5] | [1, 2, 3, 4, 5] | -| 2 | [6, 7, 8] | [6, 7, 8] | -| 3 | [] | [] | -| 4 | NULL | NULL | -| 5 | [1, 2, 3, 4, 5, 4, 3, 2, 1] | [1, 2, 3, 4, 5] | -| 6 | [1, 2, 3, NULL] | [1, 2, 3, NULL] | -| 7 | [1, 2, 3, NULL, NULL] | [1, 2, 3, NULL] | -+------+-----------------------------+---------------------------+ - -mysql> select k1, k2, array_distinct(k2) from array_test01; -+------+------------------------------------------+---------------------------+ -| k1 | k2 | array_distinct(`k2`) | -+------+------------------------------------------+---------------------------+ -| 1 | ['a', 'b', 'c', 'd', 'e'] | ['a', 'b', 'c', 'd', 'e'] | -| 2 | ['f', 'g', 'h'] | ['f', 'g', 'h'] | -| 3 | [''] | [''] | -| 3 | [NULL] | [NULL] | -| 5 | ['a', 'b', 'c', 'd', 'e', 'a', 'b', 'c'] | ['a', 'b', 'c', 'd', 'e'] | -| 6 | NULL | NULL | -| 7 | ['a', 'b', NULL] | ['a', 'b', NULL] | -| 8 | ['a', 'b', NULL, NULL] | ['a', 'b', NULL] | -+------+------------------------------------------+---------------------------+ -``` - -### keywords - -ARRAY, DISTINCT, ARRAY_DISTINCT - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-enumerate-uniq.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-enumerate-uniq.md deleted file mode 100644 index ed3b4feed12b24..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-enumerate-uniq.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -{ - "title": "ARRAY_ENUMERATE_UNIQ", - "language": "en" -} ---- - - - -## array_enumerate_uniq - - - -array_enumerate_uniq - - - -### description -#### Syntax - -`ARRAY array_enumerate_uniq(ARRAY arr)` - -Returns an array the same size as the source array, indicating for each element what its position is among elements with the same value. For example, array_enumerate_uniq([1, 2, 1, 4]) = [1, 1, 2, 1]. -The array_enumerate_uniq function can take multiple arrays of the same size as arguments. In this case, uniqueness is considered for tuples of elements in the same positions in all the arrays. For example, array_enumerate_uniq([1, 2, 1, 1, 2], [2, 1, 2, 2, 1]) = [1, 1, 2, 3, 2]. - -### example - -```shell -mysql> select k2, array_enumerate_uniq([1, 2, 3, 1, 2, 3]); -+-----------------------------------------------------+ -| array_enumerate_uniq(ARRAY(1, 2, 3, 1, 2, 3)) | -+-----------------------------------------------------+ -| [1, 1, 1, 2, 2, 2] | -+-----------------------------------------------------+ -mysql> select array_enumerate_uniq([1, 1, 1, 1, 1], [2, 1, 2, 1, 2], [3, 1, 3, 1, 3]); -+----------------------------------------------------------------------------------------+ -| array_enumerate_uniq(ARRAY(1, 1, 1, 1, 1), ARRAY(2, 1, 2, 1, 2), ARRAY(3, 1, 3, 1, 3)) | -+----------------------------------------------------------------------------------------+ -| [1, 1, 2, 1, 3] | -+----------------------------------------------------------------------------------------+ -``` - -### keywords - -ARRAY,ENUMERATE_UNIQ,ARRAY_ENUMERATE_UNIQ diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-enumerate.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-enumerate.md deleted file mode 100644 index 5499c1194ef98e..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-enumerate.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -{ - "title": "ARRAY_ENUMERATE", - "language": "en" -} ---- - - - - -## array_enumerate - - - -array_enumerate - - - -### description -#### Syntax - -`ARRAY array_enumerate(ARRAY arr)` - -Returns array sub item indexes eg. [1, 2, 3, …, length (arr) ] - -### example - -```shell -mysql> create table array_type_table(k1 INT, k2 Array) duplicate key (k1) - -> distributed by hash(k1) buckets 1 properties('replication_num' = '1'); -mysql> insert into array_type_table values (0, []), ("1", [NULL]), ("2", ["1", "2", "3"]), ("3", ["1", NULL, "3"]), ("4", NULL); -mysql> set enable_vectorized_engine = true; # enable vectorized engine -mysql> select k2, array_enumerate(k2) from array_type_table; -+------------------+-----------------------+ -| k2 | array_enumerate(`k2`) | -+------------------+-----------------------+ -| [] | [] | -| [NULL] | [1] | -| ['1', '2', '3'] | [1, 2, 3] | -| ['1', NULL, '3'] | [1, 2, 3] | -| NULL | NULL | -+------------------+-----------------------+ -5 rows in set (0.01 sec) -``` - -### keywords - -ARRAY,ENUMERATE,ARRAY_ENUMERATE diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-except.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-except.md deleted file mode 100644 index b3b43f079fe42e..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-except.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -{ - "title": "ARRAY_EXCEPT", - "language": "en" -} ---- - - - -## array_except - - - -array_except - - - -### description - -#### Syntax - -`ARRAY array_except(ARRAY array1, ARRAY array2)` - -Returns an array of the elements in array1 but not in array2, without duplicates. If the input parameter is null, null is returned. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select k1,k2,k3,array_except(k2,k3) from array_type_table; -+------+-----------------+--------------+--------------------------+ -| k1 | k2 | k3 | array_except(`k2`, `k3`) | -+------+-----------------+--------------+--------------------------+ -| 1 | [1, 2, 3] | [2, 4, 5] | [1, 3] | -| 2 | [2, 3] | [1, 5] | [2, 3] | -| 3 | [1, 1, 1] | [2, 2, 2] | [1] | -+------+-----------------+--------------+--------------------------+ - -mysql> select k1,k2,k3,array_except(k2,k3) from array_type_table_nullable; -+------+-----------------+--------------+--------------------------+ -| k1 | k2 | k3 | array_except(`k2`, `k3`) | -+------+-----------------+--------------+--------------------------+ -| 1 | [1, NULL, 3] | [1, 3, 5] | [NULL] | -| 2 | [NULL, NULL, 2] | [2, NULL, 4] | [] | -| 3 | NULL | [1, 2, 3] | NULL | -+------+-----------------+--------------+--------------------------+ - -mysql> select k1,k2,k3,array_except(k2,k3) from array_type_table_varchar; -+------+----------------------------+----------------------------------+--------------------------+ -| k1 | k2 | k3 | array_except(`k2`, `k3`) | -+------+----------------------------+----------------------------------+--------------------------+ -| 1 | ['hello', 'world', 'c++'] | ['I', 'am', 'c++'] | ['hello', 'world'] | -| 2 | ['a1', 'equals', 'b1'] | ['a2', 'equals', 'b2'] | ['a1', 'b1'] | -| 3 | ['hasnull', NULL, 'value'] | ['nohasnull', 'nonull', 'value'] | ['hasnull', NULL] | -| 3 | ['hasnull', NULL, 'value'] | ['hasnull', NULL, 'value'] | [] | -+------+----------------------------+----------------------------------+--------------------------+ - -mysql> select k1,k2,k3,array_except(k2,k3) from array_type_table_decimal; -+------+------------------+-------------------+--------------------------+ -| k1 | k2 | k3 | array_except(`k2`, `k3`) | -+------+------------------+-------------------+--------------------------+ -| 1 | [1.1, 2.1, 3.44] | [2.1, 3.4, 5.4] | [1.1, 3.44] | -| 2 | [NULL, 2, 5] | [NULL, NULL, 5.4] | [2, 5] | -| 1 | [1, NULL, 2, 5] | [1, 3.1, 5.4] | [NULL, 2, 5] | -+------+------------------+-------------------+--------------------------+ - -``` - -### keywords - -ARRAY,EXCEPT,ARRAY_EXCEPT diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-exists.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-exists.md deleted file mode 100644 index 47d5fab9df4dbb..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-exists.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -{ - "title": "ARRAY_EXISTS", - "language": "en" -} ---- - - - -## array_exists - - - -array_exists(lambda,array1,array2....) -array_exists(array1) - - - -### description - -#### Syntax -```sql -BOOLEAN array_exists(lambda, ARRAY arr1, ARRAY arr2, ... ) -BOOLEAN array_exists(ARRAY arr) -``` - -Use an optional lambda expression as an input parameter to perform corresponding expression calculations on the internal data of other input ARRAY parameters. Returns 1 when the calculation returns something other than 0; otherwise returns 0. -There are one or more parameters input in the lambda expression, which must be consistent with the number of input array columns later. Legal scalar functions can be executed in lambda, aggregate functions, etc. are not supported. -When lambda expression is not used as a parameter, array1 is used as the calculation result. - -``` -array_exists(x->x, array1); -array_exists(x->(x%2 = 0), array1); -array_exists(x->(abs(x)-1), array1); -array_exists((x,y)->(x = y), array1, array2); -array_exists(array1); -``` - -### example - -```sql - -mysql [test]>select *, array_exists(x->x>1,[1,2,3]) from array_test2 order by id; -+------+-----------------+-------------------------+-----------------------------------------------+ -| id | c_array1 | c_array2 | array_exists([x] -> x(0) > 1, ARRAY(1, 2, 3)) | -+------+-----------------+-------------------------+-----------------------------------------------+ -| 1 | [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [0, 1, 1] | -| 2 | [6, 7, 8] | [10, 12, 13] | [0, 1, 1] | -| 3 | [1] | [-100] | [0, 1, 1] | -| 4 | NULL | NULL | [0, 1, 1] | -+------+-----------------+-------------------------+-----------------------------------------------+ -4 rows in set (0.02 sec) - -mysql [test]>select c_array1, c_array2, array_exists(x->x%2=0,[1,2,3]) from array_test2 order by id; -+-----------------+-------------------------+---------------------------------------------------+ -| c_array1 | c_array2 | array_exists([x] -> x(0) % 2 = 0, ARRAY(1, 2, 3)) | -+-----------------+-------------------------+---------------------------------------------------+ -| [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [0, 1, 0] | -| [6, 7, 8] | [10, 12, 13] | [0, 1, 0] | -| [1] | [-100] | [0, 1, 0] | -| NULL | NULL | [0, 1, 0] | -+-----------------+-------------------------+---------------------------------------------------+ -4 rows in set (0.02 sec) - -mysql [test]>select c_array1, c_array2, array_exists(x->abs(x)-1,[1,2,3]) from array_test2 order by id; -+-----------------+-------------------------+----------------------------------------------------+ -| c_array1 | c_array2 | array_exists([x] -> abs(x(0)) - 1, ARRAY(1, 2, 3)) | -+-----------------+-------------------------+----------------------------------------------------+ -| [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [0, 1, 1, 1, 1] | -| [6, 7, 8] | [10, 12, 13] | [1, 1, 1] | -| [1, NULL] | [-100] | [0, NULL] | -| NULL | NULL | NULL | -+-----------------+-------------------------+----------------------------------------------------+ -4 rows in set (0.02 sec) - -mysql [test]>select c_array1, c_array2, array_exists((x,y)->x>y,c_array1,c_array2) from array_test2 order by id; -+-----------------+-------------------------+-------------------------------------------------------------+ -| c_array1 | c_array2 | array_exists([x, y] -> x(0) > y(1), `c_array1`, `c_array2`) | -+-----------------+-------------------------+-------------------------------------------------------------+ -| [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [0, 0, 1, 0, 1] | -| [6, 7, 8] | [10, 12, 13] | [0, 0, 0] | -| [1] | [-100] | [1] | -| NULL | NULL | NULL | -+-----------------+-------------------------+-------------------------------------------------------------+ -4 rows in set (0.02 sec) - -mysql [test]>select *, array_exists(c_array1) from array_test2 order by id; -+------+-----------------+-------------------------+--------------------------+ -| id | c_array1 | c_array2 | array_exists(`c_array1`) | -+------+-----------------+-------------------------+--------------------------+ -| 1 | [1, 2, 3, 0, 5] | [10, 20, -40, 80, -100] | [1, 1, 1, 0, 1] | -| 2 | [6, 7, 8] | [10, 12, 13] | [1, 1, 1] | -| 3 | [0, NULL] | [-100] | [0, NULL] | -| 4 | NULL | NULL | NULL | -+------+-----------------+-------------------------+--------------------------+ -4 rows in set (0.02 sec) - -``` - -### keywords - -ARRAY,ARRAY_EXISTS - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-filter.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-filter.md deleted file mode 100644 index 6bedc3ef6ccb61..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-filter.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -{ - "title": "ARRAY_FILTER", - "language": "en" -} ---- - - - -## array_filter - - - -array_filter(lambda,array) - - - - - -array array_filter(array arr, array_bool filter_column) - - - -### description - -#### Syntax -```sql -ARRAY array_filter(lambda, ARRAY arr) -ARRAY array_filter(ARRAY arr, ARRAY filter_column) -``` - -Use the lambda expression as the input parameter to calculate and filter the data of the ARRAY column of the other input parameter. -And filter out the values of 0 and NULL in the result. - -``` -array_filter(x->x>0, array1); -array_filter(x->(x+2)=10, array1); -array_filter(x->(abs(x)-2)>0, array1); -array_filter(c_array,[0,1,0]); -``` - -### example - -```shell -mysql [test]>select c_array,array_filter(c_array,[0,1,0]) from array_test; -+-----------------+----------------------------------------------------+ -| c_array | array_filter(`c_array`, ARRAY(FALSE, TRUE, FALSE)) | -+-----------------+----------------------------------------------------+ -| [1, 2, 3, 4, 5] | [2] | -| [6, 7, 8] | [7] | -| [] | [] | -| NULL | NULL | -+-----------------+----------------------------------------------------+ - -mysql [test]>select array_filter(x->(x > 1),[1,2,3,0,null]); -+----------------------------------------------------------------------------------------------+ -| array_filter(ARRAY(1, 2, 3, 0, NULL), array_map([x] -> (x(0) > 1), ARRAY(1, 2, 3, 0, NULL))) | -+----------------------------------------------------------------------------------------------+ -| [2, 3] | -+----------------------------------------------------------------------------------------------+ - -mysql [test]>select *, array_filter(x->x>0,c_array2) from array_test2; -+------+-----------------+-------------------------+------------------------------------------------------------------+ -| id | c_array1 | c_array2 | array_filter(`c_array2`, array_map([x] -> x(0) > 0, `c_array2`)) | -+------+-----------------+-------------------------+------------------------------------------------------------------+ -| 1 | [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [10, 20, 80] | -| 2 | [6, 7, 8] | [10, 12, 13] | [10, 12, 13] | -| 3 | [1] | [-100] | [] | -| 4 | NULL | NULL | NULL | -+------+-----------------+-------------------------+------------------------------------------------------------------+ -4 rows in set (0.01 sec) - -mysql [test]>select *, array_filter(x->x%2=0,c_array2) from array_test2; -+------+-----------------+-------------------------+----------------------------------------------------------------------+ -| id | c_array1 | c_array2 | array_filter(`c_array2`, array_map([x] -> x(0) % 2 = 0, `c_array2`)) | -+------+-----------------+-------------------------+----------------------------------------------------------------------+ -| 1 | [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [10, 20, -40, 80, -100] | -| 2 | [6, 7, 8] | [10, 12, 13] | [10, 12] | -| 3 | [1] | [-100] | [-100] | -| 4 | NULL | NULL | NULL | -+------+-----------------+-------------------------+----------------------------------------------------------------------+ - -mysql [test]>select *, array_filter(x->(x*(-10)>0),c_array2) from array_test2; -+------+-----------------+-------------------------+----------------------------------------------------------------------------+ -| id | c_array1 | c_array2 | array_filter(`c_array2`, array_map([x] -> (x(0) * (-10) > 0), `c_array2`)) | -+------+-----------------+-------------------------+----------------------------------------------------------------------------+ -| 1 | [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [-40, -100] | -| 2 | [6, 7, 8] | [10, 12, 13] | [] | -| 3 | [1] | [-100] | [-100] | -| 4 | NULL | NULL | NULL | -+------+-----------------+-------------------------+----------------------------------------------------------------------------+ - -mysql [test]>select *, array_filter(x->x>0, array_map((x,y)->(x>y), c_array1,c_array2)) as res from array_test2; -+------+-----------------+-------------------------+--------+ -| id | c_array1 | c_array2 | res | -+------+-----------------+-------------------------+--------+ -| 1 | [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [1, 1] | -| 2 | [6, 7, 8] | [10, 12, 13] | [] | -| 3 | [1] | [-100] | [1] | -| 4 | NULL | NULL | NULL | -+------+-----------------+-------------------------+--------+ -``` - -### keywords - -ARRAY,FILTER,ARRAY_FILTER - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-first-index.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-first-index.md deleted file mode 100644 index c2528d9fc3376c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-first-index.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -{ - "title": "ARRAY_FIRST_INDEX", - "language": "en" -} ---- - - - -## array_first_index - - - -array_first_index - - - -### description - -#### Syntax - -`ARRAY array_first_index(lambda, ARRAY array1, ...)` - -Use an lambda expression as an input parameter to perform corresponding expression calculations on the internal data of other input ARRAY parameters. Returns the first index such that the return value of `lambda(array1[i], ...)` is not 0. Return 0 if such index is not found. - -There are one or more parameters input in the lambda expression, and the number of elements of all input arrays must be the same. Legal scalar functions can be executed in lambda, aggregate functions, etc. are not supported. - -``` -array_first_index(x->x>1, array1); -array_first_index(x->(x%2 = 0), array1); -array_first_index(x->(abs(x)-1), array1); -array_first_index((x,y)->(x = y), array1, array2); -``` - -### example - -``` -mysql> select array_first_index(x->x+1>3, [2, 3, 4]); -+-------------------------------------------------------------------+ -| array_first_index(array_map([x] -> x(0) + 1 > 3, ARRAY(2, 3, 4))) | -+-------------------------------------------------------------------+ -| 2 | -+-------------------------------------------------------------------+ - -mysql> select array_first_index(x -> x is null, [null, 1, 2]); -+----------------------------------------------------------------------+ -| array_first_index(array_map([x] -> x(0) IS NULL, ARRAY(NULL, 1, 2))) | -+----------------------------------------------------------------------+ -| 1 | -+----------------------------------------------------------------------+ - -mysql> select array_first_index(x->power(x,2)>10, [1, 2, 3, 4]); -+---------------------------------------------------------------------------------+ -| array_first_index(array_map([x] -> power(x(0), 2.0) > 10.0, ARRAY(1, 2, 3, 4))) | -+---------------------------------------------------------------------------------+ -| 4 | -+---------------------------------------------------------------------------------+ - -mysql> select col2, col3, array_first_index((x,y)->x>y, col2, col3) from array_test; -+--------------+--------------+---------------------------------------------------------------------+ -| col2 | col3 | array_first_index(array_map([x, y] -> x(0) > y(1), `col2`, `col3`)) | -+--------------+--------------+---------------------------------------------------------------------+ -| [1, 2, 3] | [3, 4, 5] | 0 | -| [1, NULL, 2] | [NULL, 3, 1] | 3 | -| [1, 2, 3] | [9, 8, 7] | 0 | -| NULL | NULL | 0 | -+--------------+--------------+---------------------------------------------------------------------+ -``` - -### keywords - -ARRAY,FIRST_INDEX,ARRAY_FIRST_INDEX \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-first.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-first.md deleted file mode 100644 index c0045433547bd4..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-first.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -{ - "title": "ARRAY_FIRST", - "language": "en" -} ---- - - - -## array_first - - - -array_first - - - -### description -Returns the first element in the array for which func(arr1[i]) returns something other than 0. - -#### Syntax - -``` -T array_first(lambda, ARRAY) -``` - -Use a lambda bool expression and an array as the input parameters, the lambda expression is used to evaluate the internal data of other input ARRAY parameters. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> select array_first(x->x>2, [1,2,3,0]) ; -+------------------------------------------------------------------------------------------------+ -| array_first(array_filter(ARRAY(1, 2, 3, 0), array_map([x] -> x(0) > 2, ARRAY(1, 2, 3, 0))), -1) | -+------------------------------------------------------------------------------------------------+ -| 3 | -+------------------------------------------------------------------------------------------------+ - - -mysql> select array_first(x->x>4, [1,2,3,0]) ; -+------------------------------------------------------------------------------------------------+ -| array_first(array_filter(ARRAY(1, 2, 3, 0), array_map([x] -> x(0) > 4, ARRAY(1, 2, 3, 0))), -1) | -+------------------------------------------------------------------------------------------------+ -| NULL | -+------------------------------------------------------------------------------------------------+ - - -mysql> select array_first(x->x>1, [1,2,3,0]) ; -+---------------------------------------------------------------------------------------------+ -| array_first(array_filter(ARRAY(1, 2, 3, 0), array_map([x] -> x > 1, ARRAY(1, 2, 3, 0))), 1) | -+---------------------------------------------------------------------------------------------+ -| 2 | -+---------------------------------------------------------------------------------------------+ -``` - - -### keywords - -ARRAY, LAST, array_first diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-intersect.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-intersect.md deleted file mode 100644 index 5f4bcb91c4414f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-intersect.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -{ - "title": "ARRAY_INTERSECT", - "language": "en" -} ---- - - - -## array_intersect - - - -array_intersect - - - - -### description - -#### Syntax - -`ARRAY array_intersect(ARRAY array1, ARRAY array2)` - -Returns an array of the elements in the intersection of array1 and array2, without duplicates. If the input parameter is null, null is returned. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select k1,k2,k3,array_intersect(k2,k3) from array_type_table; -+------+-----------------+--------------+-----------------------------+ -| k1 | k2 | k3 | array_intersect(`k2`, `k3`) | -+------+-----------------+--------------+-----------------------------+ -| 1 | [1, 2, 3] | [2, 4, 5] | [2] | -| 2 | [2, 3] | [1, 5] | [] | -| 3 | [1, 1, 1] | [2, 2, 2] | [] | -+------+-----------------+--------------+-----------------------------+ - -mysql> select k1,k2,k3,array_intersect(k2,k3) from array_type_table_nullable; -+------+-----------------+--------------+-----------------------------+ -| k1 | k2 | k3 | array_intersect(`k2`, `k3`) | -+------+-----------------+--------------+-----------------------------+ -| 1 | [1, NULL, 3] | [1, 3, 5] | [1, 3] | -| 2 | [NULL, NULL, 2] | [2, NULL, 4] | [NULL, 2] | -| 3 | NULL | [1, 2, 3] | NULL | -+------+-----------------+--------------+-----------------------------+ - -mysql> select k1,k2,k3,array_intersect(k2,k3) from array_type_table_varchar; -+------+----------------------------+----------------------------------+-----------------------------+ -| k1 | k2 | k3 | array_intersect(`k2`, `k3`) | -+------+----------------------------+----------------------------------+-----------------------------+ -| 1 | ['hello', 'world', 'c++'] | ['I', 'am', 'c++'] | ['c++'] | -| 2 | ['a1', 'equals', 'b1'] | ['a2', 'equals', 'b2'] | ['equals'] | -| 3 | ['hasnull', NULL, 'value'] | ['nohasnull', 'nonull', 'value'] | [NULL, 'value'] | -| 3 | ['hasnull', NULL, 'value'] | ['hasnull', NULL, 'value'] | ['hasnull', 'value'] | -+------+----------------------------+----------------------------------+-----------------------------+ - -mysql> select k1,k2,k3,array_intersect(k2,k3) from array_type_table_decimal; -+------+------------------+-------------------+-----------------------------+ -| k1 | k2 | k3 | array_intersect(`k2`, `k3`) | -+------+------------------+-------------------+-----------------------------+ -| 1 | [1.1, 2.1, 3.44] | [2.1, 3.4, 5.4] | [2.1] | -| 2 | [NULL, 2, 5] | [NULL, NULL, 5.4] | [NULL] | -| 3 | [1, NULL, 2, 5] | [1, 3.1, 5.4] | [1] | -+------+------------------+-------------------+-----------------------------+ - -``` - -### keywords - -ARRAY,INTERSECT,ARRAY_INTERSECT - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-join.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-join.md deleted file mode 100644 index 74b3998dd048e5..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-join.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -{ - "title": "ARRAY_JOIN", - "language": "en" -} ---- - - - -## array_join - - - -array_join - - - - -### description - -#### Syntax - -`VARCHAR array_join(ARRAY arr, VARCHAR sep[, VARCHAR null_replace])` - -Combines all elements in the array to generate a new string according to the separator (sep) -and the string to replace NULL (null_replace). -If sep is NULL, return NULL. -If null_replace is NULL, return NULL. -If sep is an empty string, no delimiter is applied. -If null_replace is an empty string or not specified, the NULL elements in the array are discarded directly. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select k1, k2, array_join(k2, '_', 'null') from array_test order by k1; -+------+-----------------------------+------------------------------------+ -| k1 | k2 | array_join(`k2`, '_', 'null') | -+------+-----------------------------+------------------------------------+ -| 1 | [1, 2, 3, 4, 5] | 1_2_3_4_5 | -| 2 | [6, 7, 8] | 6_7_8 | -| 3 | [] | | -| 4 | NULL | NULL | -| 5 | [1, 2, 3, 4, 5, 4, 3, 2, 1] | 1_2_3_4_5_4_3_2_1 | -| 6 | [1, 2, 3, NULL] | 1_2_3_null | -| 7 | [4, 5, 6, NULL, NULL] | 4_5_6_null_null | -+------+-----------------------------+------------------------------------+ - -mysql> select k1, k2, array_join(k2, '_', 'null') from array_test01 order by k1; -+------+-----------------------------------+------------------------------------+ -| k1 | k2 | array_join(`k2`, '_', 'null') | -+------+-----------------------------------+------------------------------------+ -| 1 | ['a', 'b', 'c', 'd'] | a_b_c_d | -| 2 | ['e', 'f', 'g', 'h'] | e_f_g_h | -| 3 | [NULL, 'a', NULL, 'b', NULL, 'c'] | null_a_null_b_null_c | -| 4 | ['d', 'e', NULL, ' '] | d_e_null_ | -| 5 | [' ', NULL, 'f', 'g'] | _null_f_g | -+------+-----------------------------------+------------------------------------+ - -mysql> select k1, k2, array_join(k2, '_') from array_test order by k1; -+------+-----------------------------+----------------------------+ -| k1 | k2 | array_join(`k2`, '_') | -+------+-----------------------------+----------------------------+ -| 1 | [1, 2, 3, 4, 5] | 1_2_3_4_5 | -| 2 | [6, 7, 8] | 6_7_8 | -| 3 | [] | | -| 4 | NULL | NULL | -| 5 | [1, 2, 3, 4, 5, 4, 3, 2, 1] | 1_2_3_4_5_4_3_2_1 | -| 6 | [1, 2, 3, NULL] | 1_2_3 | -| 7 | [4, 5, 6, NULL, NULL] | 4_5_6 | -+------+-----------------------------+----------------------------+ - -mysql> select k1, k2, array_join(k2, '_') from array_test01 order by k1; -+------+-----------------------------------+----------------------------+ -| k1 | k2 | array_join(`k2`, '_') | -+------+-----------------------------------+----------------------------+ -| 1 | ['a', 'b', 'c', 'd'] | a_b_c_d | -| 2 | ['e', 'f', 'g', 'h'] | e_f_g_h | -| 3 | [NULL, 'a', NULL, 'b', NULL, 'c'] | a_b_c | -| 4 | ['d', 'e', NULL, ' '] | d_e_ | -| 5 | [' ', NULL, 'f', 'g'] | _f_g | -+------+-----------------------------------+----------------------------+ -``` - -### keywords - -ARRAY, JOIN, ARRAY_JOIN diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-last-index.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-last-index.md deleted file mode 100644 index 37f2a4669048d1..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-last-index.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -{ - "title": "ARRAY_LAST_INDEX", - "language": "en" -} ---- - - - -## array_last_index - - - -array_last_index - - - -### description - -#### Syntax - -`ARRAY array_last_index(lambda, ARRAY array1, ...)` - -Use an lambda expression as an input parameter to perform corresponding expression calculations on the internal data of other input ARRAY parameters. Returns the last index such that the return value of `lambda(array1[i], ...)` is not 0. Return 0 if such index is not found. - -There are one or more parameters input in the lambda expression, and the number of elements of all input arrays must be the same. Legal scalar functions can be executed in lambda, aggregate functions, etc. are not supported. - -``` -array_last_index(x->x>1, array1); -array_last_index(x->(x%2 = 0), array1); -array_last_index(x->(abs(x)-1), array1); -array_last_index((x,y)->(x = y), array1, array2); -``` - -### example - -``` -mysql> select array_last_index(x -> x is null, [null, null, 1, 2]); -+------------------------------------------------------------------------+ -| array_last_index(array_map([x] -> x IS NULL, ARRAY(NULL, NULL, 1, 2))) | -+------------------------------------------------------------------------+ -| 2 | -+------------------------------------------------------------------------+ - - -mysql> select array_last_index(x->x='s', ['a', 's', 's', 's', 'b']); -+-----------------------------------------------------------------------------+ -| array_last_index(array_map([x] -> x = 's', ARRAY('a', 's', 's', 's', 'b'))) | -+-----------------------------------------------------------------------------+ -| 4 | -+-----------------------------------------------------------------------------+ - -mysql> select array_last_index(x->power(x,2)>10, [1, 4, 3, 4]); -+-----------------------------------------------------------------------------+ -| array_last_index(array_map([x] -> power(x, 2.0) > 10.0, ARRAY(1, 4, 3, 4))) | -+-----------------------------------------------------------------------------+ -| 4 | -+-----------------------------------------------------------------------------+ - -mysql> select col2, col3, array_last_index((x,y)->x>y, col2, col3) from array_test; -+--------------+--------------+---------------------------------------------------------------------+ -| col2 | col3 | array_last_index(array_map([x, y] -> x(0) > y(1), `col2`, `col3`)) | -+--------------+--------------+---------------------------------------------------------------------+ -| [1, 2, 3] | [3, 4, 5] | 0 | -| [1, NULL, 2] | [NULL, 3, 1] | 3 | -| [1, 2, 3] | [9, 8, 7] | 0 | -| NULL | NULL | 0 | -+--------------+--------------+---------------------------------------------------------------------+ -``` - -### keywords - -ARRAY,FIRST_INDEX,array_last_index \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-last.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-last.md deleted file mode 100644 index 1df05963f59201..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-last.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -{ - "title": "ARRAY_LAST", - "language": "en" -} ---- - - - -## array_last - - - -array_last - - - -### description -Returns the last element in the array for which func(arr1[i]) returns something other than 0. - -#### Syntax - -``` -T array_last(lambda, ARRAY) -``` - -Use a lambda bool expression and an array as the input parameters, the lambda expression is used to evaluate the internal data of other input ARRAY parameters. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> select array_last(x->x>2, [1,2,3,0]) ; -+------------------------------------------------------------------------------------------------+ -| array_last(array_filter(ARRAY(1, 2, 3, 0), array_map([x] -> x(0) > 2, ARRAY(1, 2, 3, 0))), -1) | -+------------------------------------------------------------------------------------------------+ -| 3 | -+------------------------------------------------------------------------------------------------+ - - -mysql> select array_last(x->x>4, [1,2,3,0]) ; -+------------------------------------------------------------------------------------------------+ -| array_last(array_filter(ARRAY(1, 2, 3, 0), array_map([x] -> x(0) > 4, ARRAY(1, 2, 3, 0))), -1) | -+------------------------------------------------------------------------------------------------+ -| NULL | -+------------------------------------------------------------------------------------------------+ - - -### keywords - -ARRAY, LAST, ARRAY_LAST diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-map.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-map.md deleted file mode 100644 index 1ed86730f8674b..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-map.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -{ - "title": "ARRAY_MAP", - "language": "en" -} ---- - - - -## array_map - - - -array_map(lambda,array,....) - - - -### description - -#### Syntax -`ARRAY array_map(lambda, ARRAY array1, ARRAY array2)` - -Use a lambda expression as the input parameter to calculate the corresponding expression for the internal data of other input ARRAY parameters. -The number of parameters entered in the lambda expression is 1 or more, which must be consistent with the number of input array columns. -The scalar functions can be executed in lambda, and aggregate functions are not supported. - -``` -array_map(x->x, array1); -array_map(x->(x+2), array1); -array_map(x->(abs(x)-2), array1); - -array_map((x,y)->(x = y), array1, array2); -array_map((x,y)->(power(x,2)+y), array1, array2); -array_map((x,y,z)->(abs(x)+y*z), array1, array2, array3); -``` - -### example - -```shell - -mysql [test]>select *, array_map(x->x,[1,2,3]) from array_test2 order by id; -+------+-----------------+-------------------------+----------------------------------------+ -| id | c_array1 | c_array2 | array_map([x] -> x(0), ARRAY(1, 2, 3)) | -+------+-----------------+-------------------------+----------------------------------------+ -| 1 | [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [1, 2, 3] | -| 2 | [6, 7, 8] | [10, 12, 13] | [1, 2, 3] | -| 3 | [1] | [-100] | [1, 2, 3] | -| 4 | NULL | NULL | [1, 2, 3] | -+------+-----------------+-------------------------+----------------------------------------+ -4 rows in set (0.02 sec) - -mysql [test]>select *, array_map(x->x+2,[1,2,3]) from array_test2 order by id; -+------+-----------------+-------------------------+--------------------------------------------+ -| id | c_array1 | c_array2 | array_map([x] -> x(0) + 2, ARRAY(1, 2, 3)) | -+------+-----------------+-------------------------+--------------------------------------------+ -| 1 | [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [3, 4, 5] | -| 2 | [6, 7, 8] | [10, 12, 13] | [3, 4, 5] | -| 3 | [1] | [-100] | [3, 4, 5] | -| 4 | NULL | NULL | [3, 4, 5] | -+------+-----------------+-------------------------+--------------------------------------------+ -4 rows in set (0.02 sec) - -mysql [test]>select c_array1, c_array2, array_map(x->x,[1,2,3]) from array_test2 order by id; -+-----------------+-------------------------+----------------------------------------+ -| c_array1 | c_array2 | array_map([x] -> x(0), ARRAY(1, 2, 3)) | -+-----------------+-------------------------+----------------------------------------+ -| [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [1, 2, 3] | -| [6, 7, 8] | [10, 12, 13] | [1, 2, 3] | -| [1] | [-100] | [1, 2, 3] | -| NULL | NULL | [1, 2, 3] | -+-----------------+-------------------------+----------------------------------------+ -4 rows in set (0.01 sec) - -mysql [test]>select c_array1, c_array2, array_map(x->power(x,2),[1,2,3]) from array_test2 order by id; -+-----------------+-------------------------+----------------------------------------------------+ -| c_array1 | c_array2 | array_map([x] -> power(x(0), 2.0), ARRAY(1, 2, 3)) | -+-----------------+-------------------------+----------------------------------------------------+ -| [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [1, 4, 9] | -| [6, 7, 8] | [10, 12, 13] | [1, 4, 9] | -| [1] | [-100] | [1, 4, 9] | -| NULL | NULL | [1, 4, 9] | -+-----------------+-------------------------+----------------------------------------------------+ - -mysql [test]>select c_array1, c_array2, array_map((x,y)->x+y,c_array1,c_array2) from array_test2 order by id; -+-----------------+-------------------------+----------------------------------------------------------+ -| c_array1 | c_array2 | array_map([x, y] -> x(0) + y(1), `c_array1`, `c_array2`) | -+-----------------+-------------------------+----------------------------------------------------------+ -| [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [11, 22, -37, 84, -95] | -| [6, 7, 8] | [10, 12, 13] | [16, 19, 21] | -| [1] | [-100] | [-99] | -| NULL | NULL | NULL | -+-----------------+-------------------------+----------------------------------------------------------+ -4 rows in set (0.02 sec) - -mysql [test]>select c_array1, c_array2, array_map((x,y)->power(x,2)+y,c_array1, c_array2) from array_test2 order by id; -+-----------------+-------------------------+----------------------------------------------------------------------+ -| c_array1 | c_array2 | array_map([x, y] -> power(x(0), 2.0) + y(1), `c_array1`, `c_array2`) | -+-----------------+-------------------------+----------------------------------------------------------------------+ -| [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [11, 24, -31, 96, -75] | -| [6, 7, 8] | [10, 12, 13] | [46, 61, 77] | -| [1] | [-100] | [-99] | -| NULL | NULL | NULL | -+-----------------+-------------------------+----------------------------------------------------------------------+ -4 rows in set (0.03 sec) - -mysql [test]>select *,array_map(x->x=3,c_array1) from array_test2 order by id; -+------+-----------------+-------------------------+----------------------------------------+ -| id | c_array1 | c_array2 | array_map([x] -> x(0) = 3, `c_array1`) | -+------+-----------------+-------------------------+----------------------------------------+ -| 1 | [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [0, 0, 1, 0, 0] | -| 2 | [6, 7, 8] | [10, 12, 13] | [0, 0, 0] | -| 3 | [1] | [-100] | [0] | -| 4 | NULL | NULL | NULL | -+------+-----------------+-------------------------+----------------------------------------+ -4 rows in set (0.02 sec) - -mysql [test]>select *,array_map(x->x>3,c_array1) from array_test2 order by id; -+------+-----------------+-------------------------+----------------------------------------+ -| id | c_array1 | c_array2 | array_map([x] -> x(0) > 3, `c_array1`) | -+------+-----------------+-------------------------+----------------------------------------+ -| 1 | [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [0, 0, 0, 1, 1] | -| 2 | [6, 7, 8] | [10, 12, 13] | [1, 1, 1] | -| 3 | [1] | [-100] | [0] | -| 4 | NULL | NULL | NULL | -+------+-----------------+-------------------------+----------------------------------------+ -4 rows in set (0.02 sec) - -mysql [test]>select *,array_map((x,y)->x>y,c_array1,c_array2) from array_test2 order by id; -+------+-----------------+-------------------------+----------------------------------------------------------+ -| id | c_array1 | c_array2 | array_map([x, y] -> x(0) > y(1), `c_array1`, `c_array2`) | -+------+-----------------+-------------------------+----------------------------------------------------------+ -| 1 | [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [0, 0, 1, 0, 1] | -| 2 | [6, 7, 8] | [10, 12, 13] | [0, 0, 0] | -| 3 | [1] | [-100] | [1] | -| 4 | NULL | NULL | NULL | -+------+-----------------+-------------------------+----------------------------------------------------------+ -4 rows in set (0.02 sec) - -mysql [test]>select array_map(x->cast(x as string), c_array1) from test_array_map_function; -+-----------------+-------------------------------------------------------+ -| c_array1 | array_map([x] -> CAST(x(0) AS CHARACTER), `c_array1`) | -+-----------------+-------------------------------------------------------+ -| [1, 2, 3, 4, 5] | ['1', '2', '3', '4', '5'] | -| [6, 7, 8] | ['6', '7', '8'] | -| [] | [] | -| NULL | NULL | -+-----------------+-------------------------------------------------------+ -4 rows in set (0.01 sec) -``` - -### keywords - -ARRAY,MAP,ARRAY_MAP - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-max.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-max.md deleted file mode 100644 index 1c094afe620558..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-max.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -{ - "title": "ARRAY_MAX", - "language": "en" -} ---- - - -## array_max - - - -array_max - - - -### description - -#### Syntax -`T array_max(ARRAY array1)` - -Get the maximum element in an array (`NULL` values are skipped). -When the array is empty or all elements in the array are `NULL` values, the function returns `NULL`. - -### example - -```shell -mysql> create table array_type_table(k1 INT, k2 Array) duplicate key (k1) - -> distributed by hash(k1) buckets 1 properties('replication_num' = '1'); -mysql> insert into array_type_table values (0, []), (1, [NULL]), (2, [1, 2, 3]), (3, [1, NULL, 3]); -mysql> set enable_vectorized_engine = true; # enable vectorized engine -mysql> select k2, array_max(k2) from array_type_table; -+--------------+-----------------+ -| k2 | array_max(`k2`) | -+--------------+-----------------+ -| [] | NULL | -| [NULL] | NULL | -| [1, 2, 3] | 3 | -| [1, NULL, 3] | 3 | -+--------------+-----------------+ -4 rows in set (0.02 sec) - -``` - -### keywords - -ARRAY,MAX,ARRAY_MAX - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-min.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-min.md deleted file mode 100644 index ff29cb382b7fb0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-min.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -{ - "title": "ARRAY_MIN", - "language": "en" -} ---- - - - -## array_min - - - -array_min - - - -### description - -#### Syntax -`T array_min(ARRAY array1)` - -Get the minimum element in an array (`NULL` values are skipped). -When the array is empty or all elements in the array are `NULL` values, the function returns `NULL`. - -### example - -```shell -mysql> create table array_type_table(k1 INT, k2 Array) duplicate key (k1) - -> distributed by hash(k1) buckets 1 properties('replication_num' = '1'); -mysql> insert into array_type_table values (0, []), (1, [NULL]), (2, [1, 2, 3]), (3, [1, NULL, 3]); -mysql> set enable_vectorized_engine = true; # enable vectorized engine -mysql> select k2, array_min(k2) from array_type_table; -+--------------+-----------------+ -| k2 | array_min(`k2`) | -+--------------+-----------------+ -| [] | NULL | -| [NULL] | NULL | -| [1, 2, 3] | 1 | -| [1, NULL, 3] | 1 | -+--------------+-----------------+ -4 rows in set (0.02 sec) - -``` - -### keywords - -ARRAY,MIN,ARRAY_MIN diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-popback.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-popback.md deleted file mode 100644 index 0e2dd1748eb674..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-popback.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -{ - "title": "ARRAY_POPBACK", - "language": "en" -} ---- - - - -## array_popback - - - -array_popback - - - -### description - -#### Syntax - -`ARRAY array_popback(ARRAY arr)` - -Remove the last element from array. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select array_popback(['test', NULL, 'value']); -+-----------------------------------------------------+ -| array_popback(ARRAY('test', NULL, 'value')) | -+-----------------------------------------------------+ -| [test, NULL] | -+-----------------------------------------------------+ - -``` - -### keywords - -ARRAY,POPBACK,ARRAY_POPBACK - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-popfront.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-popfront.md deleted file mode 100644 index 010d0f18a76f02..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-popfront.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -{ - "title": "ARRAY_POPFRONT", - "language": "en" -} ---- - - - -## array_popfront - - - -array_popfront - - - -### description - -#### Syntax - -`ARRAY array_popfront(ARRAY arr)` - -Remove the first element from array. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select array_popfront(['test', NULL, 'value']); -+-----------------------------------------------------+ -| array_popfront(ARRAY('test', NULL, 'value')) | -+-----------------------------------------------------+ -| [NULL, value] | -+-----------------------------------------------------+ - -``` - -### keywords - -ARRAY,POPFRONT,ARRAY_POPFRONT - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-position.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-position.md deleted file mode 100644 index 390f9ac431d7d3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-position.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -{ - "title": "ARRAY_POSITION", - "language": "en" -} ---- - - - -## array_position - - - -array_position - - - -### description - -#### Syntax - -`BIGINT array_position(ARRAY arr, T value)` - -Returns a position/index of first occurrence of the `value` in the given array. - -``` -position - value position in array (starts with 1); -0 - if value does not present in the array; -NULL - when array is NULL. -``` - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> SELECT id,c_array,array_position(c_array, 5) FROM `array_test`; -+------+-----------------+------------------------------+ -| id | c_array | array_position(`c_array`, 5) | -+------+-----------------+------------------------------+ -| 1 | [1, 2, 3, 4, 5] | 5 | -| 2 | [6, 7, 8] | 0 | -| 3 | [] | 0 | -| 4 | NULL | NULL | -+------+-----------------+------------------------------+ - -mysql> select array_position([1, null], null); -+--------------------------------------+ -| array_position(ARRAY(1, NULL), NULL) | -+--------------------------------------+ -| 2 | -+--------------------------------------+ -1 row in set (0.01 sec) -``` - -### keywords - -ARRAY,POSITION,ARRAY_POSITION - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-product.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-product.md deleted file mode 100644 index da46511044fa2a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-product.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -{ - "title": "ARRAY_PRODUCT", - "language": "en" -} ---- - - -## array_product - - - -array_product - - - -### description - -#### Syntax - -`T array_product(ARRAY arr)` - -Get the product of all elements in an array (`NULL` values are skipped). -When the array is empty or all elements in the array are `NULL` values, the function returns `NULL`. - -### example - -```shell -mysql> create table array_type_table(k1 INT, k2 Array) duplicate key (k1) - -> distributed by hash(k1) buckets 1 properties('replication_num' = '1'); -mysql> insert into array_type_table values (0, []), (1, [NULL]), (2, [1, 2, 3]), (3, [1, NULL, 3]); -mysql> set enable_vectorized_engine = true; # enable vectorized engine -mysql> select k2, array_product(k2) from array_type_table; -+--------------+---------------------+ -| k2 | array_product(`k2`) | -+--------------+---------------------+ -| [] | NULL | -| [NULL] | NULL | -| [1, 2, 3] | 6 | -| [1, NULL, 3] | 3 | -+--------------+---------------------+ -4 rows in set (0.01 sec) - -``` - -### keywords - -ARRAY,PRODUCT,ARRAY_PRODUCT - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-pushback.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-pushback.md deleted file mode 100644 index 360cd18af221a1..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-pushback.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -{ - "title": "ARRAY_PUSHBACK", - "language": "en" -} ---- - - - -## array_pushback - - - -array_pushback - - - -### description - -#### Syntax - -`Array array_pushback(Array arr, T value)` - -Add the value to the end of the array. - -#### Returned value - -The array after adding the value. - -Type: Array. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> select array_pushback([1, 2], 3); -+---------------------------------+ -| array_pushback(ARRAY(1, 2), 3) | -+---------------------------------+ -| [1, 2, 3] | -+---------------------------------+ - -mysql> select col3, array_pushback(col3, 6) from array_test; -+-----------+----------------------------+ -| col3 | array_pushback(`col3`, 6) | -+-----------+----------------------------+ -| [3, 4, 5] | [3, 4, 5, 6] | -| [NULL] | [NULL, 6] | -| NULL | NULL | -| [] | [6] | -+-----------+----------------------------+ - -mysql> select col1, col3, array_pushback(col3, col1) from array_test; -+------+-----------+---------------------------------+ -| col1 | col3 | array_pushback(`col3`, `col1`) | -+------+-----------+---------------------------------+ -| 0 | [3, 4, 5] | [3, 4, 5, 0] | -| 1 | [NULL] | [NULL, 1] | -| 2 | NULL | NULL | -| 3 | [] | [3] | -+------+-----------+---------------------------------+ -``` - -### keywords - -ARRAY,PUSHBACK,ARRAY_PUSHBACK \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-pushfront.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-pushfront.md deleted file mode 100644 index 5d717846329782..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-pushfront.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -{ - "title": "ARRAY_PUSHFRONT", - "language": "en" -} ---- - - - -## array_pushfront - - - -array_pushfront - - - -### description - -#### Syntax - -`Array array_pushfront(Array arr, T value)` - -Add the value to the beginning of the array. - -#### Returned value - -The array after adding the value. - -Type: Array. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> select array_pushfront([1, 2], 3); -+---------------------------------+ -| array_pushfront(ARRAY(1, 2), 3) | -+---------------------------------+ -| [3, 1, 2] | -+---------------------------------+ - -mysql> select col3, array_pushfront(col3, 6) from array_test; -+-----------+----------------------------+ -| col3 | array_pushfront(`col3`, 6) | -+-----------+----------------------------+ -| [3, 4, 5] | [6, 3, 4, 5] | -| [NULL] | [6, NULL] | -| NULL | NULL | -| [] | [6] | -+-----------+----------------------------+ - -mysql> select col1, col3, array_pushfront(col3, col1) from array_test; -+------+-----------+---------------------------------+ -| col1 | col3 | array_pushfront(`col3`, `col1`) | -+------+-----------+---------------------------------+ -| 0 | [3, 4, 5] | [0, 3, 4, 5] | -| 1 | [NULL] | [1, NULL] | -| 2 | NULL | NULL | -| 3 | [] | [3] | -+------+-----------+---------------------------------+ -``` - -### keywords - -ARRAY,PUSHFRONT,ARRAY_PUSHFRONT \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-range.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-range.md deleted file mode 100644 index c8119a9defe560..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-range.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -{ - "title": "ARRAY_RANGE", - "language": "en" -} ---- - - - -## array_range - - - -array_range - - - -### description - -#### Syntax - -```sql -ARRAY array_range(Int end) -ARRAY array_range(Int start, Int end) -ARRAY array_range(Int start, Int end, Int step) -ARRAY array_range(Datetime start_datetime, Datetime end_datetime) -ARRAY array_range(Datetime start_datetime, Datetime end_datetime, INTERVAL Int interval_step UNIT) -``` -1. To generate array of int: -The parameters are all positive integers. -start default value is 0, and step default value is 1. -Return the array which numbers from start to end - 1 by step. - -2. To generate array of datetime: -At least taking two parameters. -The first two parameters are all datetimev2, the third is positive integer. -If the third part is missing, `INTERVAL 1 DAY` will be default value. -UNIT supports YEAR/MONTH/WEEK/DAY/HOUR/MINUTE/SECOND. -Return the array of datetimev2 between start_datetime and closest to end_datetime by interval_step UNIT. - -### notice - -`if the 3rd parameter step/interval_step is negative or zero, the function will return NULL` - -### example - -``` -mysql> select array_range(10); -+--------------------------------+ -| array_range(10) | -+--------------------------------+ -| [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] | -+--------------------------------+ - -mysql> select array_range(10,20); -+------------------------------------------+ -| array_range(10, 20) | -+------------------------------------------+ -| [10, 11, 12, 13, 14, 15, 16, 17, 18, 19] | -+------------------------------------------+ - -mysql> select array_range(0,20,2); -+-------------------------------------+ -| array_range(0, 20, 2) | -+-------------------------------------+ -| [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] | -+-------------------------------------+ - -mysql> select array_range(cast('2022-05-15 12:00:00' as datetimev2(0)), cast('2022-05-17 12:00:00' as datetimev2(0))) AS array_range_default; -+------------------------------------------------+ -| array_range_default | -+------------------------------------------------+ -| ["2022-05-15 12:00:00", "2022-05-16 12:00:00"] | -+------------------------------------------------+ - -mysql> select array_range(cast('2019-05-15 12:00:00' as datetimev2(0)), cast('2022-05-17 12:00:00' as datetimev2(0)), interval 2 year) as array_range_2_year; -+------------------------------------------------+ -| array_range_2_year | -+------------------------------------------------+ -| ["2019-05-15 12:00:00", "2021-05-15 12:00:00"] | -+------------------------------------------------+ -``` - -### keywords - -ARRAY, RANGE, ARRAY_RANGE diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-remove.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-remove.md deleted file mode 100644 index e1a684211e0932..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-remove.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -{ - "title": "ARRAY_REMOVE", - "language": "en" -} ---- - - - -## array_remove - - - -array_remove - - - -### description - -#### Syntax - -`ARRAY array_remove(ARRAY arr, T val)` - -Remove all elements that equal to element from array. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select array_remove(['test', NULL, 'value'], 'value'); -+-----------------------------------------------------+ -| array_remove(ARRAY('test', NULL, 'value'), 'value') | -+-----------------------------------------------------+ -| [test, NULL] | -+-----------------------------------------------------+ - -mysql> select k1, k2, array_remove(k2, 1) from array_type_table_1; -+------+--------------------+-----------------------+ -| k1 | k2 | array_remove(`k2`, 1) | -+------+--------------------+-----------------------+ -| 1 | [1, 2, 3] | [2, 3] | -| 2 | [1, 3] | [3] | -| 3 | NULL | NULL | -| 4 | [1, 3] | [3] | -| 5 | [NULL, 1, NULL, 2] | [NULL, NULL, 2] | -+------+--------------------+-----------------------+ - -mysql> select k1, k2, array_remove(k2, k1) from array_type_table_1; -+------+--------------------+--------------------------+ -| k1 | k2 | array_remove(`k2`, `k1`) | -+------+--------------------+--------------------------+ -| 1 | [1, 2, 3] | [2, 3] | -| 2 | [1, 3] | [1, 3] | -| 3 | NULL | NULL | -| 4 | [1, 3] | [1, 3] | -| 5 | [NULL, 1, NULL, 2] | [NULL, 1, NULL, 2] | -+------+--------------------+--------------------------+ - -mysql> select k1, k2, array_remove(k2, date('2022-10-10')) from array_type_table_date; -+------+--------------------------+-------------------------------------------------+ -| k1 | k2 | array_remove(`k2`, date('2022-10-10 00:00:00')) | -+------+--------------------------+-------------------------------------------------+ -| 1 | [2021-10-10, 2022-10-10] | [2021-10-10] | -| 2 | [NULL, 2022-05-14] | [NULL, 2022-05-14] | -+------+--------------------------+-------------------------------------------------+ - -mysql> select k1, k2, array_remove(k2, k1) from array_type_table_nullable; -+------+-----------+--------------------------+ -| k1 | k2 | array_remove(`k2`, `k1`) | -+------+-----------+--------------------------+ -| NULL | [1, 2, 3] | NULL | -| 1 | NULL | NULL | -| NULL | [NULL, 1] | NULL | -| 1 | [NULL, 1] | [NULL] | -+------+-----------+--------------------------+ - -``` - -### keywords - -ARRAY,REMOVE,ARRAY_REMOVE - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-reverse-sort.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-reverse-sort.md deleted file mode 100644 index 8610dc5d231561..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-reverse-sort.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -{ - "title": "ARRAY_REVERSE_SORT", - "language": "en" -} ---- - - - -## array_reverse_sort - - - -array_reverse_sort - - - -### description - -#### Syntax - -`ARRAY array_reverse_sort(ARRAY arr)` - -Return the array which has been sorted in descending order. Return NULL for NULL input. -If the element of array is NULL, it will be placed in the last of the sorted array. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; -mysql> select k1, k2, array_reverse_sort(k2) from array_test; -+------+-----------------------------+-----------------------------+ -| k1 | k2 | array_reverse_sort(`k2`) | -+------+-----------------------------+-----------------------------+ -| 1 | [1, 2, 3, 4, 5] | [5, 4, 3, 2, 1] | -| 2 | [6, 7, 8] | [8, 7, 6] | -| 3 | [] | [] | -| 4 | NULL | NULL | -| 5 | [1, 2, 3, 4, 5, 4, 3, 2, 1] | [5, 4, 4, 3, 3, 2, 2, 1, 1] | -| 6 | [1, 2, 3, NULL] | [3, 2, 1, NULL] | -| 7 | [1, 2, 3, NULL, NULL] | [3, 2, 1, NULL, NULL] | -| 8 | [1, 1, 2, NULL, NULL] | [2, 1, 1, NULL, NULL] | -| 9 | [1, NULL, 1, 2, NULL, NULL] | [2, 1, 1, NULL, NULL, NULL] | -+------+-----------------------------+-----------------------------+ - -mysql> select k1, k2, array_reverse_sort(k2) from array_test01; -+------+------------------------------------------+------------------------------------------+ -| k1 | k2 | array_reverse_sort(`k2`) | -+------+------------------------------------------+------------------------------------------+ -| 1 | ['a', 'b', 'c', 'd', 'e'] | ['e', 'd', 'c', 'b', 'a'] | -| 2 | ['f', 'g', 'h'] | ['h', 'g', 'f'] | -| 3 | [''] | [''] | -| 3 | [NULL] | [NULL] | -| 5 | ['a', 'b', 'c', 'd', 'e', 'a', 'b', 'c'] | ['e', 'd', 'c', 'c', 'b', 'b', 'a', 'a'] | -| 6 | NULL | NULL | -| 7 | ['a', 'b', NULL] | ['b', 'a', NULL] | -| 8 | ['a', 'b', NULL, NULL] | ['b', 'a', NULL, NULL] | -+------+------------------------------------------+------------------------------------------+ -``` - -### keywords - -ARRAY, SORT, REVERSE, ARRAY_SORT, ARRAY_REVERSE_SORT diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-shuffle.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-shuffle.md deleted file mode 100644 index bc29200c830a47..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-shuffle.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -{ - "title": "ARRAY_SHUFFLE", - "language": "en" -} ---- - - - -## array_shuffle - - - -array_shuffle -shuffle - - - -### description - -#### Syntax - -```sql -ARRAY array_shuffle(ARRAY array1, [INT seed]) -ARRAY shuffle(ARRAY array1, [INT seed]) -``` - -Randomly arrange the elements in the array. Among them, the parameter array1 is the array to be randomly arranged, and the optional parameter seed is to set the initial value used by the pseudo-random number generator to generate pseudo-random numbers. -Shuffle has the same function as array_shuffle. - -``` -array_shuffle(array1); -array_shuffle(array1, 0); -shuffle(array1); -shuffle(array1, 0); -``` - -### example - -```sql - -mysql [test]> select c_array1, array_shuffle(c_array1) from array_test; -+-----------------------+---------------------------+ -| c_array1 | array_shuffle(`c_array1`) | -+-----------------------+---------------------------+ -| [1, 2, 3, 4, 5, NULL] | [2, NULL, 5, 3, 4, 1] | -| [6, 7, 8, NULL] | [7, NULL, 8, 6] | -| [1, NULL] | [1, NULL] | -| NULL | NULL | -+-----------------------+---------------------------+ -4 rows in set (0.01 sec) - -MySQL [test]> select c_array1, array_shuffle(c_array1, 0) from array_test; -+-----------------------+------------------------------+ -| c_array1 | array_shuffle(`c_array1`, 0) | -+-----------------------+------------------------------+ -| [1, 2, 3, 4, 5, NULL] | [1, 3, 2, NULL, 4, 5] | -| [6, 7, 8, NULL] | [6, 8, 7, NULL] | -| [1, NULL] | [1, NULL] | -| NULL | NULL | -+-----------------------+------------------------------+ -4 rows in set (0.01 sec) - -``` - -### keywords - -ARRAY,ARRAY_SHUFFLE,SHUFFLE diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-size.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-size.md deleted file mode 100644 index fc254f4344a017..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-size.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -{ - "title": "ARRAY_SIZE", - "language": "en" -} ---- - - - -## array_size (size, cardinality) - - - -array_size (size, cardinality) - - - -### description - -#### Syntax - -```sql -BIGINT size(ARRAY arr) -BIGINT array_size(ARRAY arr) -BIGINT cardinality(ARRAY arr) -``` - -Returns the size of the array, returns NULL for NULL input. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select k1,k2,size(k2) from array_test; -+------+-----------+------------+ -| k1 | k2 | size(`k2`) | -+------+-----------+------------+ -| 1 | [1, 2, 3] | 3 | -| 2 | [] | 0 | -| 3 | NULL | NULL | -+------+-----------+------------+ - -mysql> select k1,k2,array_size(k2) from array_test; -+------+-----------+------------------+ -| k1 | k2 | array_size(`k2`) | -+------+-----------+------------------+ -| 1 | [1, 2, 3] | 3 | -| 2 | [] | 0 | -| 3 | NULL | NULL | -+------+-----------+------------------+ - -mysql> select k1,k2,cardinality(k2) from array_test; -+------+-----------+-------------------+ -| k1 | k2 | cardinality(`k2`) | -+------+-----------+-------------------+ -| 1 | [1, 2, 3] | 3 | -| 2 | [] | 0 | -| 3 | NULL | NULL | -+------+-----------+-------------------+ - -``` - -### keywords - -ARRAY_SIZE, SIZE, CARDINALITY - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-slice.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-slice.md deleted file mode 100644 index 20149ea0a05d3d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-slice.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -{ - "title": "ARRAY_SLICE", - "language": "en" -} ---- - - - -## array_slice - - - -array_slice - - - -### description - -#### Syntax - -`ARRAY array_slice(ARRAY arr, BIGINT off, BIGINT len)` - -Returns a slice of the array. - -``` -A positive off indicates an indent on the left -A negative off indicates an indent on the right. -An empty array is returned when the off is not within the actual range of the array. -A negative len will be treated as 0. -``` - -### notice - -`Only supported in vectorized engine` - -### example - - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select k2, k2[2:2] from array_type_table_nullable; -+-----------------+-------------------------+ -| k2 | array_slice(`k2`, 2, 2) | -+-----------------+-------------------------+ -| [1, 2, 3] | [2, 3] | -| [1, NULL, 3] | [NULL, 3] | -| [2, 3] | [3] | -| NULL | NULL | -+-----------------+-------------------------+ - -mysql> select k2, array_slice(k2, 2, 2) from array_type_table_nullable; -+-----------------+-------------------------+ -| k2 | array_slice(`k2`, 2, 2) | -+-----------------+-------------------------+ -| [1, 2, 3] | [2, 3] | -| [1, NULL, 3] | [NULL, 3] | -| [2, 3] | [3] | -| NULL | NULL | -+-----------------+-------------------------+ - -mysql> select k2, k2[2:2] from array_type_table_nullable_varchar; -+----------------------------+-------------------------+ -| k2 | array_slice(`k2`, 2, 2) | -+----------------------------+-------------------------+ -| ['hello', 'world', 'c++'] | ['world', 'c++'] | -| ['a1', 'equals', 'b1'] | ['equals', 'b1'] | -| ['hasnull', NULL, 'value'] | [NULL, 'value'] | -| ['hasnull', NULL, 'value'] | [NULL, 'value'] | -+----------------------------+-------------------------+ - -mysql> select k2, array_slice(k2, 2, 2) from array_type_table_nullable_varchar; -+----------------------------+-------------------------+ -| k2 | array_slice(`k2`, 2, 2) | -+----------------------------+-------------------------+ -| ['hello', 'world', 'c++'] | ['world', 'c++'] | -| ['a1', 'equals', 'b1'] | ['equals', 'b1'] | -| ['hasnull', NULL, 'value'] | [NULL, 'value'] | -| ['hasnull', NULL, 'value'] | [NULL, 'value'] | -+----------------------------+-------------------------+ -``` - -Negative off: - -``` -mysql> select k2, k2[-2:1] from array_type_table_nullable; -+-----------+--------------------------+ -| k2 | array_slice(`k2`, -2, 1) | -+-----------+--------------------------+ -| [1, 2, 3] | [2] | -| [1, 2, 3] | [2] | -| [2, 3] | [2] | -| [2, 3] | [2] | -+-----------+--------------------------+ - -mysql> select k2, array_slice(k2, -2, 1) from array_type_table_nullable; -+-----------+--------------------------+ -| k2 | array_slice(`k2`, -2, 1) | -+-----------+--------------------------+ -| [1, 2, 3] | [2] | -| [1, 2, 3] | [2] | -| [2, 3] | [2] | -| [2, 3] | [2] | -+-----------+--------------------------+ - -mysql> select k2, k2[-2:2] from array_type_table_nullable_varchar; -+----------------------------+--------------------------+ -| k2 | array_slice(`k2`, -2, 2) | -+----------------------------+--------------------------+ -| ['hello', 'world', 'c++'] | ['world', 'c++'] | -| ['a1', 'equals', 'b1'] | ['equals', 'b1'] | -| ['hasnull', NULL, 'value'] | [NULL, 'value'] | -| ['hasnull', NULL, 'value'] | [NULL, 'value'] | -+----------------------------+--------------------------+ - -mysql> select k2, array_slice(k2, -2, 2) from array_type_table_nullable_varchar; -+----------------------------+--------------------------+ -| k2 | array_slice(`k2`, -2, 2) | -+----------------------------+--------------------------+ -| ['hello', 'world', 'c++'] | ['world', 'c++'] | -| ['a1', 'equals', 'b1'] | ['equals', 'b1'] | -| ['hasnull', NULL, 'value'] | [NULL, 'value'] | -| ['hasnull', NULL, 'value'] | [NULL, 'value'] | -+----------------------------+--------------------------+ -``` - -``` -mysql> select k2, array_slice(k2, 0) from array_type_table; -+-----------+-------------------------+ -| k2 | array_slice(`k2`, 0) | -+-----------+-------------------------+ -| [1, 2, 3] | [] | -+-----------+-------------------------+ - -mysql> select k2, array_slice(k2, -5) from array_type_table; -+-----------+----------------------+ -| k2 | array_slice(`k2`, -5) | -+-----------+----------------------+ -| [1, 2, 3] | [] | -+-----------+----------------------+ -``` - -### keywords - -ARRAY,SLICE,ARRAY_SLICE - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-sort.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-sort.md deleted file mode 100644 index 821d7ae6390a1c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-sort.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -{ - "title": "ARRAY_SORT", - "language": "en" -} ---- - - - -## array_sort - - - -array_sort - - - -### description - -#### Syntax - -`ARRAY array_sort(ARRAY arr)` - -Return the array which has been sorted in ascending order. Return NULL for NULL input. -If the element of array is NULL, it will be placed in the front of the sorted array. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; -mysql> select k1, k2, array_sort(k2) from array_test; -+------+-----------------------------+-----------------------------+ -| k1 | k2 | array_sort(`k2`) | -+------+-----------------------------+-----------------------------+ -| 1 | [1, 2, 3, 4, 5] | [1, 2, 3, 4, 5] | -| 2 | [6, 7, 8] | [6, 7, 8] | -| 3 | [] | [] | -| 4 | NULL | NULL | -| 5 | [1, 2, 3, 4, 5, 4, 3, 2, 1] | [1, 1, 2, 2, 3, 3, 4, 4, 5] | -| 6 | [1, 2, 3, NULL] | [NULL, 1, 2, 3] | -| 7 | [1, 2, 3, NULL, NULL] | [NULL, NULL, 1, 2, 3] | -| 8 | [1, 1, 2, NULL, NULL] | [NULL, NULL, 1, 1, 2] | -| 9 | [1, NULL, 1, 2, NULL, NULL] | [NULL, NULL, NULL, 1, 1, 2] | -+------+-----------------------------+-----------------------------+ - -mysql> select k1, k2, array_sort(k2) from array_test01; -+------+------------------------------------------+------------------------------------------+ -| k1 | k2 | array_sort(`k2`) | -+------+------------------------------------------+------------------------------------------+ -| 1 | ['a', 'b', 'c', 'd', 'e'] | ['a', 'b', 'c', 'd', 'e'] | -| 2 | ['f', 'g', 'h'] | ['f', 'g', 'h'] | -| 3 | [''] | [''] | -| 3 | [NULL] | [NULL] | -| 5 | ['a', 'b', 'c', 'd', 'e', 'a', 'b', 'c'] | ['a', 'a', 'b', 'b', 'c', 'c', 'd', 'e'] | -| 6 | NULL | NULL | -| 7 | ['a', 'b', NULL] | [NULL, 'a', 'b'] | -| 8 | ['a', 'b', NULL, NULL] | [NULL, NULL, 'a', 'b'] | -+------+------------------------------------------+------------------------------------------+ -``` - -### keywords - -ARRAY, SORT, ARRAY_SORT - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-sortby.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-sortby.md deleted file mode 100644 index 6f42dac111a0a9..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-sortby.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -{ - "title": "ARRAY_SORTBY", - "language": "en" -} ---- - - - -## array_sortby - - - -array_sortby - - - -### description - -#### Syntax - -```sql -ARRAY array_sortby(ARRAY src,Array key) -ARRAY array_sortby(lambda,array....) -``` -First, arrange the key column in ascending order, and then return the corresponding column of the src column sorted in this order as the result; -Returns NULL if the input array src is NULL. -If the input array key is NULL, the order in which src is returned remains unchanged. -If the input array key element contains NULL, the output sorted array will place NULL first. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql [test]>select array_sortby(['a','b','c'],[3,2,1]); -+----------------------------------------------------+ -| array_sortby(ARRAY('a', 'b', 'c'), ARRAY(3, 2, 1)) | -+----------------------------------------------------+ -| ['c', 'b', 'a'] | -+----------------------------------------------------+ - -mysql [test]>select array_sortby([1,2,3,4,5],[10,5,1,20,80]); -+-------------------------------------------------------------+ -| array_sortby(ARRAY(1, 2, 3, 4, 5), ARRAY(10, 5, 1, 20, 80)) | -+-------------------------------------------------------------+ -| [3, 2, 1, 4, 5] | -+-------------------------------------------------------------+ - -mysql [test]>select *,array_sortby(c_array1,c_array2) from test_array_sortby order by id; -+------+-----------------+-------------------------+--------------------------------------+ -| id | c_array1 | c_array2 | array_sortby(`c_array1`, `c_array2`) | -+------+-----------------+-------------------------+--------------------------------------+ -| 0 | NULL | [2] | NULL | -| 1 | [1, 2, 3, 4, 5] | [10, 20, -40, 80, -100] | [5, 3, 1, 2, 4] | -| 2 | [6, 7, 8] | [10, 12, 13] | [6, 7, 8] | -| 3 | [1] | [-100] | [1] | -| 4 | NULL | NULL | NULL | -| 5 | [3] | NULL | [3] | -| 6 | [1, 2] | [2, 1] | [2, 1] | -| 7 | [NULL] | [NULL] | [NULL] | -| 8 | [1, 2, 3] | [3, 2, 1] | [3, 2, 1] | -+------+-----------------+-------------------------+--------------------------------------+ - -mysql [test]>select *, array_map((x,y)->(y+x),c_array1,c_array2) as arr_sum,array_sortby((x,y)->(y+x),c_array1,c_array2) as arr_sort from array_test2; -+------+-----------------+--------------+----------------+-----------------+ -| id | c_array1 | c_array2 | arr_sum | arr_sort | -+------+-----------------+--------------+----------------+-----------------+ -| 1 | [1, 2, 3] | [10, 11, 12] | [11, 13, 15] | [1, 2, 3] | -| 2 | [4, 3, 5] | [10, 20, 30] | [14, 23, 35] | [4, 3, 5] | -| 3 | [-40, 30, -100] | [30, 10, 20] | [-10, 40, -80] | [-100, -40, 30] | -+------+-----------------+--------------+----------------+-----------------+ -``` - -### keywords - -ARRAY, SORT, ARRAY_SORTBY - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-sum.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-sum.md deleted file mode 100644 index f4597a062edb68..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-sum.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -{ - "title": "ARRAY_SUM", - "language": "en" -} ---- - - - -## array_sum - - -array_sum - - - -### description - -#### Syntax - -```sql -T array_sum(ARRAY src, Array key) -T array_sum(lambda, Array arr1, Array arr2 ....) -``` - -Get the sum of all elements in an array (`NULL` values are skipped). -When the array is empty or all elements in the array are `NULL` values, the function returns `NULL`. - -### example - -```shell -mysql> create table array_type_table(k1 INT, k2 Array) duplicate key (k1) - -> distributed by hash(k1) buckets 1 properties('replication_num' = '1'); -mysql> insert into array_type_table values (0, []), (1, [NULL]), (2, [1, 2, 3]), (3, [1, NULL, 3]); -mysql> set enable_vectorized_engine = true; # enable vectorized engine -mysql> select k2, array_sum(k2) from array_type_table; -+--------------+-----------------+ -| k2 | array_sum(`k2`) | -+--------------+-----------------+ -| [] | NULL | -| [NULL] | NULL | -| [1, 2, 3] | 6 | -| [1, NULL, 3] | 4 | -+--------------+-----------------+ -4 rows in set (0.01 sec) - -``` - -### keywords - -ARRAY,SUM,ARRAY_SUM - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-union.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-union.md deleted file mode 100644 index 60a3ed3764ff70..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-union.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -{ - "title": "ARRAY_UNION", - "language": "en" -} ---- - - - -## array_union - - - -array_union - - - -### description - -#### Syntax - -`ARRAY array_union(ARRAY array1, ARRAY array2)` - -Returns an array of the elements in the union of array1 and array2, without duplicates. If the input parameter is null, null is returned. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select k1,k2,k3,array_union(k2,k3) from array_type_table; -+------+-----------------+--------------+-------------------------+ -| k1 | k2 | k3 | array_union(`k2`, `k3`) | -+------+-----------------+--------------+-------------------------+ -| 1 | [1, 2, 3] | [2, 4, 5] | [1, 2, 3, 4, 5] | -| 2 | [2, 3] | [1, 5] | [2, 3, 1, 5] | -| 3 | [1, 1, 1] | [2, 2, 2] | [1, 2] | -+------+-----------------+--------------+-------------------------+ - -mysql> select k1,k2,k3,array_union(k2,k3) from array_type_table_nullable; -+------+-----------------+--------------+-------------------------+ -| k1 | k2 | k3 | array_union(`k2`, `k3`) | -+------+-----------------+--------------+-------------------------+ -| 1 | [1, NULL, 3] | [1, 3, 5] | [1, NULL, 3, 5] | -| 2 | [NULL, NULL, 2] | [2, NULL, 4] | [NULL, 2, 4] | -| 3 | NULL | [1, 2, 3] | NULL | -+------+-----------------+--------------+-------------------------+ - -mysql> select k1,k2,k3,array_union(k2,k3) from array_type_table_varchar; -+------+----------------------------+----------------------------------+---------------------------------------------------+ -| k1 | k2 | k3 | array_union(`k2`, `k3`) | -+------+----------------------------+----------------------------------+---------------------------------------------------+ -| 1 | ['hello', 'world', 'c++'] | ['I', 'am', 'c++'] | ['hello', 'world', 'c++', 'I', 'am'] | -| 2 | ['a1', 'equals', 'b1'] | ['a2', 'equals', 'b2'] | ['a1', 'equals', 'b1', 'a2', 'b2'] | -| 3 | ['hasnull', NULL, 'value'] | ['nohasnull', 'nonull', 'value'] | ['hasnull', NULL, 'value', 'nohasnull', 'nonull'] | -| 4 | ['hasnull', NULL, 'value'] | ['hasnull', NULL, 'value'] | ['hasnull', NULL, 'value'] | -+------+----------------------------+----------------------------------+---------------------------------------------------+ - -mysql> select k1,k2,k3,array_union(k2,k3) from array_type_table_decimal; -+------+------------------+-------------------+----------------------------+ -| k1 | k2 | k3 | array_union(`k2`, `k3`) | -+------+------------------+-------------------+----------------------------+ -| 1 | [1.1, 2.1, 3.44] | [2.1, 3.4, 5.4] | [1.1, 2.1, 3.44, 3.4, 5.4] | -| 2 | [NULL, 2, 5] | [NULL, NULL, 5.4] | [NULL, 2, 5, 5.4] | -| 4 | [1, NULL, 2, 5] | [1, 3.1, 5.4] | [1, NULL, 2, 5, 3.1, 5.4] | -+------+------------------+-------------------+----------------------------+ - -``` - -### keywords - -ARRAY,UNION,ARRAY_UNION - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-with-constant.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-with-constant.md deleted file mode 100644 index c142c48617ad24..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-with-constant.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -{ - "title": "ARRAY_WITH_CONSTANT", - "language": "en" -} ---- - - - -## array_with_constant - - - -array_with_constant - - - -### description - -#### Syntax - -```sql -ARRAY array_with_constant(n, T) -ARRAY array_repeat(T, n) -``` - -get array of constants with n length, array_repeat has the same function as array_with_constant and is used to be compatible with the hive syntax format -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select array_with_constant(2, "hello"), array_repeat("hello", 2); -+---------------------------------+--------------------------+ -| array_with_constant(2, 'hello') | array_repeat('hello', 2) | -+---------------------------------+--------------------------+ -| ['hello', 'hello'] | ['hello', 'hello'] | -+---------------------------------+--------------------------+ -1 row in set (0.04 sec) - -mysql> select array_with_constant(3, 12345), array_repeat(12345, 3); -+-------------------------------+------------------------+ -| array_with_constant(3, 12345) | array_repeat(12345, 3) | -+-------------------------------+------------------------+ -| [12345, 12345, 12345] | [12345, 12345, 12345] | -+-------------------------------+------------------------+ -1 row in set (0.01 sec) - -mysql> select array_with_constant(3, null), array_repeat(null, 3); -+------------------------------+-----------------------+ -| array_with_constant(3, NULL) | array_repeat(NULL, 3) | -+------------------------------+-----------------------+ -| [NULL, NULL, NULL] | [NULL, NULL, NULL] | -+------------------------------+-----------------------+ -1 row in set (0.01 sec) - -mysql> select array_with_constant(null, 3), array_repeat(3, null); -+------------------------------+-----------------------+ -| array_with_constant(NULL, 3) | array_repeat(3, NULL) | -+------------------------------+-----------------------+ -| [] | [] | -+------------------------------+-----------------------+ -1 row in set (0.01 sec) - -``` - -### keywords - -ARRAY,WITH_CONSTANT,ARRAY_WITH_CONSTANT,ARRAY_REPEAT diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array-zip.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array-zip.md deleted file mode 100644 index 801c39aaad64d2..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array-zip.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -{ - "title": "ARRAY_ZIP", - "language": "en" -} ---- - - - -## array_zip - - - -array_zip - - - -### description - -Combines all all arrays into a single array. The resulting array contains the corresponding elements of the source arrays grouped into structs in the listed order of arguments. - -#### Syntax - -`Array> array_zip(Array, Array, ...)` - -#### Returned value - -Array with elements from the source arrays grouped into tuples. Data types in the tuple are the same as types of the input arrays and in the same order as arrays are passed. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> select array_zip(['a', 'b', 'c'], [1, 2, 3]); -+-------------------------------------------------+ -| array_zip(ARRAY('a', 'b', 'c'), ARRAY(1, 2, 3)) | -+-------------------------------------------------+ -| [{'a', 1}, {'b', 2}, {'c', 3}] | -+-------------------------------------------------+ -1 row in set (0.01 sec) -``` - -### keywords - -ARRAY,ZIP,ARRAY_ZIP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/array.md b/docs/en/docs/sql-manual/sql-functions/array-functions/array.md deleted file mode 100644 index 8762e04e2af7cc..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/array.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -{ - "title": "ARRAY", - "language": "en" -} ---- - - - -## array() - - - -array() - - - -### description - -#### Syntax - -`ARRAY array(T, ...)` - -construct an array with variadic elements and return it, T could be column or literal - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select array("1", 2, 1.1); -+----------------------+ -| array('1', 2, '1.1') | -+----------------------+ -| ['1', '2', '1.1'] | -+----------------------+ -1 row in set (0.00 sec) - - -mysql> select array(null, 1); -+----------------+ -| array(NULL, 1) | -+----------------+ -| [NULL, 1] | -+----------------+ -1 row in set (0.00 sec) - -mysql> select array(1, 2, 3); -+----------------+ -| array(1, 2, 3) | -+----------------+ -| [1, 2, 3] | -+----------------+ -1 row in set (0.00 sec) - -mysql> select array(qid, creationDate, null) from nested limit 4; -+------------------------------------+ -| array(`qid`, `creationDate`, NULL) | -+------------------------------------+ -| [1000038, 20090616074056, NULL] | -| [1000069, 20090616075005, NULL] | -| [1000130, 20090616080918, NULL] | -| [1000145, 20090616081545, NULL] | -+------------------------------------+ -4 rows in set (0.01 sec) -``` - -### keywords - -ARRAY,ARRAY,CONSTRUCTOR - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/arrays-overlap.md b/docs/en/docs/sql-manual/sql-functions/array-functions/arrays-overlap.md deleted file mode 100644 index 9762e257f79a55..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/arrays-overlap.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -{ - "title": "ARRAYS_OVERLAP", - "language": "en" -} ---- - - - - -## arrays_overlap - - - -arrays_overlap - - - -### description - -#### Syntax - -`BOOLEAN arrays_overlap(ARRAY left, ARRAY right)` - -Check if there is any common element for left and right array. Return below values: - -``` -1 - if any common element inside left and right array; -0 - if no common element inside left and right array; -NULL - when left or right array is NULL; OR any element inside left and right array is NULL; -``` - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select c_left,c_right,arrays_overlap(c_left,c_right) from array_test; -+--------------+-----------+-------------------------------------+ -| c_left | c_right | arrays_overlap(`c_left`, `c_right`) | -+--------------+-----------+-------------------------------------+ -| [1, 2, 3] | [3, 4, 5] | 1 | -| [1, 2, 3] | [5, 6] | 0 | -| [1, 2, NULL] | [1] | NULL | -| NULL | [1, 2] | NULL | -| [1, 2, 3] | [1, 2] | 1 | -+--------------+-----------+-------------------------------------+ -``` - -### keywords - -ARRAY,ARRAYS,OVERLAP,ARRAYS_OVERLAP diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/countequal.md b/docs/en/docs/sql-manual/sql-functions/array-functions/countequal.md deleted file mode 100644 index 2d5876b50ac181..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/countequal.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -{ - "title": "COUNTEQUAL", - "language": "en" -} ---- - - - -## countequal - - - -countequal - - - -### description - -#### Syntax - -`BIGINT countequal(ARRAY arr, T value)` - -Returns a number of the `value` in the given array. - -``` -num - how many the value number in array; -0 - if value does not present in the array; -NULL - when array is NULL. -``` - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> set enable_vectorized_engine=true; - -mysql> select *, countEqual(c_array,5) from array_test; -+------+-----------------+--------------------------+ -| id | c_array | countequal(`c_array`, 5) | -+------+-----------------+--------------------------+ -| 1 | [1, 2, 3, 4, 5] | 1 | -| 2 | [6, 7, 8] | 0 | -| 3 | [] | 0 | -| 4 | NULL | NULL | -+------+-----------------+--------------------------+ - -mysql> select *,countEqual(c_array, 1),countEqual(c_array, 5),countEqual(c_array, NULL) from array_test; -+------+-----------------------+--------------------------+--------------------------+-----------------------------+ -| id | c_array | countequal(`c_array`, 1) | countequal(`c_array`, 5) | countequal(`c_array`, NULL) | -+------+-----------------------+--------------------------+--------------------------+-----------------------------+ -| 1 | [1, 2, 3, 4, 5] | 1 | 1 | 0 | -| 2 | [6, 7, 8] | 0 | 0 | 0 | -| 3 | [] | 0 | 0 | 0 | -| 4 | NULL | NULL | NULL | NULL | -| 5 | [66, 77] | 0 | 0 | 0 | -| 5 | [66, 77] | 0 | 0 | 0 | -| 6 | NULL | NULL | NULL | NULL | -| 7 | [NULL, NULL, NULL] | 0 | 0 | 3 | -| 8 | [1, 2, 3, 4, 5, 5, 5] | 1 | 3 | 0 | -+------+-----------------------+--------------------------+--------------------------+-----------------------------+ -``` - -### keywords - -ARRAY,COUNTEQUAL, - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/element-at.md b/docs/en/docs/sql-manual/sql-functions/array-functions/element-at.md deleted file mode 100644 index 706b37230fdab7..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/element-at.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -{ - "title": "ELEMENT_AT", - "language": "en" -} ---- - - -## element_at - - - -element_at - - - -### description - -#### Syntax - -```sql -T element_at(ARRAY arr, BIGINT position) -T arr[position] -``` - -Returns an element of an array located at the input position. If there is no element at the position, return NULL. - -`position` is 1-based and support negative number. - -### notice - -`Only supported in vectorized engine` - -### example - -positive `position` example: - -``` -mysql> set enable_vectorized_engine=true; - -mysql> SELECT id,c_array,element_at(c_array, 5) FROM `array_test`; -+------+-----------------+--------------------------+ -| id | c_array | element_at(`c_array`, 5) | -+------+-----------------+--------------------------+ -| 1 | [1, 2, 3, 4, 5] | 5 | -| 2 | [6, 7, 8] | NULL | -| 3 | [] | NULL | -| 4 | NULL | NULL | -+------+-----------------+--------------------------+ -``` - -negative `position` example: - -``` -mysql> set enable_vectorized_engine=true; - -mysql> SELECT id,c_array,c_array[-2] FROM `array_test`; -+------+-----------------+----------------------------------+ -| id | c_array | %element_extract%(`c_array`, -2) | -+------+-----------------+----------------------------------+ -| 1 | [1, 2, 3, 4, 5] | 4 | -| 2 | [6, 7, 8] | 7 | -| 3 | [] | NULL | -| 4 | NULL | NULL | -+------+-----------------+----------------------------------+ -``` - -### keywords - -ELEMENT_AT, SUBSCRIPT - diff --git a/docs/en/docs/sql-manual/sql-functions/array-functions/sequence.md b/docs/en/docs/sql-manual/sql-functions/array-functions/sequence.md deleted file mode 100644 index b5e45413fa422a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/array-functions/sequence.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -{ - "title": "SEQUENCE", - "language": "en" -} ---- - - - -## sequence - - - -sequence - - - -### description -alias of array_range function - -#### Syntax - -```sql -ARRAY sequence(Int end) -ARRAY sequence(Int start, Int end) -ARRAY sequence(Int start, Int end, Int step) -ARRAY sequence(Datetime start_datetime, Datetime end_datetime) -ARRAY sequence(Datetime start_datetime, Datetime end_datetime, INTERVAL Int interval_step UNIT) -``` -1. To generate array of int: -The parameters are all positive integers. -start default value is 0, and step default value is 1. -Return the array which numbers from start to end - 1 by step. - -2. To generate array of datetime: -At least taking two parameters. -The first two parameters are all datetimev2, the third is positive integer. -If the third part is missing, `INTERVAL 1 DAY` will be default value. -UNIT supports YEAR/MONTH/WEEK/DAY/HOUR/MINUTE/SECOND. -Return the array of datetimev2 between start_datetime and closest to end_datetime by interval_step UNIT. - -### notice - -`if the 3rd parameter step/interval_step is negative or zero, the function will return NULL` - -### example - -``` -mysql> select sequence(10); -+--------------------------------+ -| sequence(10) | -+--------------------------------+ -| [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] | -+--------------------------------+ - -mysql> select sequence(10,20); -+------------------------------------------+ -| sequence(10, 20) | -+------------------------------------------+ -| [10, 11, 12, 13, 14, 15, 16, 17, 18, 19] | -+------------------------------------------+ - -mysql> select sequence(0,20,2); -+-------------------------------------+ -| sequence(0, 20, 2) | -+-------------------------------------+ -| [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] | -+-------------------------------------+ - -mysql> select sequence(cast('2022-05-15 12:00:00' as datetimev2(0)), cast('2022-05-17 12:00:00' as datetimev2(0))) AS sequence_default; -+------------------------------------------------+ -| sequence_default | -+------------------------------------------------+ -| ["2022-05-15 12:00:00", "2022-05-16 12:00:00"] | -+------------------------------------------------+ - -mysql> select sequence(cast('2019-05-15 12:00:00' as datetimev2(0)), cast('2022-05-17 12:00:00' as datetimev2(0)), interval 2 year) as sequence_2_year; -+------------------------------------------------+ -| sequence_2_year | -+------------------------------------------------+ -| ["2019-05-15 12:00:00", "2021-05-15 12:00:00"] | -+------------------------------------------------+ -``` - -### keywords - -ARRAY, RANGE, SEQUENCE diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-and-count.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-and-count.md deleted file mode 100644 index 14991554520dc0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-and-count.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -{ - "title": "BITMAP_AND_COUNT", - "language": "en" -} ---- - - - -## bitmap_and_count -### description -#### Syntax - -`BigIntVal bitmap_and_count(BITMAP lhs, BITMAP rhs, ...)` - -Calculate the intersection of two or more input bitmaps and return the number of intersections. - -### example - -``` -MySQL> select bitmap_and_count(bitmap_from_string('1,2,3'),bitmap_empty()); -+---------------------------------------------------------------+ -| bitmap_and_count(bitmap_from_string('1,2,3'), bitmap_empty()) | -+---------------------------------------------------------------+ -| 0 | -+---------------------------------------------------------------+ - - -MySQL> select bitmap_and_count(bitmap_from_string('1,2,3'),bitmap_from_string('1,2,3')); -+----------------------------------------------------------------------------+ -| bitmap_and_count(bitmap_from_string('1,2,3'), bitmap_from_string('1,2,3')) | -+----------------------------------------------------------------------------+ -| 3 | -+----------------------------------------------------------------------------+ - -MySQL> select bitmap_and_count(bitmap_from_string('1,2,3'),bitmap_from_string('3,4,5')); -+----------------------------------------------------------------------------+ -| bitmap_and_count(bitmap_from_string('1,2,3'), bitmap_from_string('3,4,5')) | -+----------------------------------------------------------------------------+ -| 1 | -+----------------------------------------------------------------------------+ - -MySQL> select bitmap_and_count(bitmap_from_string('1,2,3'), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5')); -+-------------------------------------------------------------------------------------------------------------+ -| (bitmap_and_count(bitmap_from_string('1,2,3'), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'))) | -+-------------------------------------------------------------------------------------------------------------+ -| 2 | -+-------------------------------------------------------------------------------------------------------------+ - -MySQL> select bitmap_and_count(bitmap_from_string('1,2,3'), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'),bitmap_empty()); -+-----------------------------------------------------------------------------------------------------------------------------+ -| (bitmap_and_count(bitmap_from_string('1,2,3'), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'), bitmap_empty())) | -+-----------------------------------------------------------------------------------------------------------------------------+ -| 0 | -+-----------------------------------------------------------------------------------------------------------------------------+ - -MySQL> select bitmap_and_count(bitmap_from_string('1,2,3'), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'), NULL); -+-------------------------------------------------------------------------------------------------------------------+ -| (bitmap_and_count(bitmap_from_string('1,2,3'), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'), NULL)) | -+-------------------------------------------------------------------------------------------------------------------+ -| NULL | -+-------------------------------------------------------------------------------------------------------------------+ -``` - -### keywords - - BITMAP_AND_COUNT,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-and-not-count.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-and-not-count.md deleted file mode 100644 index 11695a9e4e2b06..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-and-not-count.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -{ - "title": "BITMAP_AND_NOT_COUNT,BITMAP_ANDNOT_COUNT", - "language": "en" -} ---- - - - -## bitmap_and_not_count,bitmap_andnot_count -### description -#### Syntax - -`BITMAP BITMAP_AND_NOT_COUNT(BITMAP lhs, BITMAP rhs)` - -Calculate the set after lhs minus intersection of two input bitmaps, return the new bitmap size. - - -### example - -``` -mysql> select bitmap_and_not_count(bitmap_from_string('1,2,3'),bitmap_from_string('3,4,5')) cnt; -+------+ -| cnt | -+------+ -| 2 | -+------+ -``` - -### keywords - - BITMAP_AND_NOT_COUNT,BITMAP_ANDNOT_COUNT,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-and-not.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-and-not.md deleted file mode 100644 index beee61656dcce2..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-and-not.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -{ - "title": "BITMAP_AND_NOT,BITMAP_ANDNOT", - "language": "en" -} ---- - - - -## bitmap_and_not,bitmap_andnot -### description -#### Syntax - -`BITMAP BITMAP_AND_NOT(BITMAP lhs, BITMAP rhs)` - -Calculate the set after lhs minus intersection of two input bitmaps, return the new bitmap. - -### example - -``` -mysql> select bitmap_count(bitmap_and_not(bitmap_from_string('1,2,3'),bitmap_from_string('3,4,5'))) cnt; -+------+ -| cnt | -+------+ -| 2 | -+------+ - -mysql> select bitmap_to_string(bitmap_and_not(bitmap_from_string('1,2,3'),bitmap_from_string('3,4,5'))); -+--------------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_and_not(bitmap_from_string('1,2,3'), bitmap_from_string('3,4,5'))) | -+--------------------------------------------------------------------------------------------+ -| 1,2 | -+--------------------------------------------------------------------------------------------+ - -mysql> select bitmap_to_string(bitmap_and_not(bitmap_from_string('1,2,3'),bitmap_empty())) ; -+-------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_and_not(bitmap_from_string('1,2,3'), bitmap_empty())) | -+-------------------------------------------------------------------------------+ -| 1,2,3 | -+-------------------------------------------------------------------------------+ - -mysql> select bitmap_to_string(bitmap_and_not(bitmap_from_string('1,2,3'),NULL)); -+---------------------------------------------------------------------+ -| bitmap_to_string(bitmap_and_not(bitmap_from_string('1,2,3'), NULL)) | -+---------------------------------------------------------------------+ -| NULL | -+---------------------------------------------------------------------+ -``` - -### keywords - - BITMAP_AND_NOT,BITMAP_ANDNOT,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-and.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-and.md deleted file mode 100644 index b638a31dcd55d0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-and.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -{ - "title": "BITMAP_AND", - "language": "en" -} ---- - - - -## bitmap_and -### description -#### Syntax - -`BITMAP BITMAP_AND(BITMAP lhs, BITMAP rhs, ...)` - -Compute intersection of two or more input bitmaps, return the new bitmap. - -### example - -``` -mysql> select bitmap_count(bitmap_and(to_bitmap(1), to_bitmap(2))) cnt; -+------+ -| cnt | -+------+ -| 0 | -+------+ - -mysql> select bitmap_to_string(bitmap_and(to_bitmap(1), to_bitmap(2))); -+----------------------------------------------------------+ -| bitmap_to_string(bitmap_and(to_bitmap(1), to_bitmap(2))) | -+----------------------------------------------------------+ -| | -+----------------------------------------------------------+ - -mysql> select bitmap_count(bitmap_and(to_bitmap(1), to_bitmap(1))) cnt; -+------+ -| cnt | -+------+ -| 1 | -+------+ - -MySQL> select bitmap_to_string(bitmap_and(to_bitmap(1), to_bitmap(1))); -+----------------------------------------------------------+ -| bitmap_to_string(bitmap_and(to_bitmap(1), to_bitmap(1))) | -+----------------------------------------------------------+ -| 1 | -+----------------------------------------------------------+ - -MySQL> select bitmap_to_string(bitmap_and(bitmap_from_string('1,2,3'), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'))); -+-----------------------------------------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_and(bitmap_from_string('1,2,3'), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'))) | -+-----------------------------------------------------------------------------------------------------------------------+ -| 1,2 | -+-----------------------------------------------------------------------------------------------------------------------+ - -MySQL> select bitmap_to_string(bitmap_and(bitmap_from_string('1,2,3'), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'),bitmap_empty())); -+---------------------------------------------------------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_and(bitmap_from_string('1,2,3'), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'), bitmap_empty())) | -+---------------------------------------------------------------------------------------------------------------------------------------+ -| | -+---------------------------------------------------------------------------------------------------------------------------------------+ - -MySQL> select bitmap_to_string(bitmap_and(bitmap_from_string('1,2,3'), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'),NULL)); -+-----------------------------------------------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_and(bitmap_from_string('1,2,3'), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'), NULL)) | -+-----------------------------------------------------------------------------------------------------------------------------+ -| NULL | -+-----------------------------------------------------------------------------------------------------------------------------+ -``` - -### keywords - - BITMAP_AND,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-contains.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-contains.md deleted file mode 100644 index 01b8e665cb3207..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-contains.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "BITMAP_CONTAINS", - "language": "en" -} ---- - - - -## bitmap_contains -### description -#### Syntax - -`BOOLEAN BITMAP_CONTAINS(BITMAP bitmap, BIGINT input)` - -Calculates whether the input value is in the Bitmap column and returns a Boolean value. - -### example - -``` -mysql> select bitmap_contains(to_bitmap(1),2) cnt; -+------+ -| cnt | -+------+ -| 0 | -+------+ - -mysql> select bitmap_contains(to_bitmap(1),1) cnt; -+------+ -| cnt | -+------+ -| 1 | -+------+ -``` - -### keywords - - BITMAP_CONTAINS,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-count.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-count.md deleted file mode 100644 index fb90c995890e58..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-count.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "BITMAP_COUNT", - "language": "en" -} ---- - - - -## bitmap_count -### description -#### Syntax - -`BITMAP BITMAP_COUNT(BITMAP lhs)` - -Returns the number of input bitmaps. - -### example - -``` -mysql> select bitmap_count(to_bitmap(1)) cnt; -+------+ -| cnt | -+------+ -| 1 | -+------+ - -mysql> select bitmap_count(bitmap_and(to_bitmap(1), to_bitmap(1))) cnt; -+------+ -| cnt | -+------+ -| 1 | -+------+ - -``` - -### keywords - - BITMAP_COUNT diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-empty.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-empty.md deleted file mode 100644 index 291f926ec2195d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-empty.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -{ - "title": "BITMAP_EMPTY", - "language": "en" -} ---- - - - -## bitmap_empty -### description -#### Syntax - -`BITMAP BITMAP_EMPTY()` - -Return an empty bitmap. Mainly be used to supply default value for bitmap column when loading, e.g., - -``` -cat data | curl --location-trusted -u user:passwd -T - -H "columns: dt,page,v1,v2=bitmap_empty()" http://host:8410/api/test/testDb/_stream_load -``` - -### example - -``` -mysql> select bitmap_count(bitmap_empty()); -+------------------------------+ -| bitmap_count(bitmap_empty()) | -+------------------------------+ -| 0 | -+------------------------------+ - -mysql> select bitmap_to_string(bitmap_empty()); -+----------------------------------+ -| bitmap_to_string(bitmap_empty()) | -+----------------------------------+ -| | -+----------------------------------+ -``` - -### keywords - - BITMAP_EMPTY,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-from-array.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-from-array.md deleted file mode 100644 index 6ba524d46733c2..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-from-array.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ - "title": "BITMAP_FROM_ARRAY", - "language": "en" -} ---- - - - -## bitmap_from_array - -### description -#### Syntax - -`BITMAP BITMAP_FROM_ARRAY(ARRAY input)` - -Convert a TINYINT/SMALLINT/INT/BIGINT array to a BITMAP -When the input field is illegal, the result returns NULL - -### example - -``` -mysql> select *, bitmap_to_string(bitmap_from_array(c_array)) from array_test; -+------+-----------------------+------------------------------------------------+ -| id | c_array | bitmap_to_string(bitmap_from_array(`c_array`)) | -+------+-----------------------+------------------------------------------------+ -| 1 | [NULL] | NULL | -| 2 | [1, 2, 3, NULL] | NULL | -| 2 | [1, 2, 3, -10] | NULL | -| 3 | [1, 2, 3, 4, 5, 6, 7] | 1,2,3,4,5,6,7 | -| 4 | [100, 200, 300, 300] | 100,200,300 | -+------+-----------------------+------------------------------------------------+ -5 rows in set (0.02 sec) -``` - -### keywords - - BITMAP_FROM_ARRAY,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-from-base64.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-from-base64.md deleted file mode 100644 index 4a60f9e5fc9d03..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-from-base64.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "BITMAP_FROM_BASE64", - "language": "en" -} ---- - - - -## bitmap_from_base64 - -### description -#### Syntax - -`BITMAP BITMAP_FROM_BASE64(VARCHAR input)` - -Convert a base64 string(result of function `bitmap_to_base64`) into a bitmap. If input string is invalid, return NULL. - -### example - -``` -mysql> select bitmap_to_string(bitmap_from_base64("AA==")); -+----------------------------------------------+ -| bitmap_to_string(bitmap_from_base64("AA==")) | -+----------------------------------------------+ -| | -+----------------------------------------------+ - -mysql> select bitmap_to_string(bitmap_from_base64("AQEAAAA=")); -+-----------------------------------+ -| bitmap_to_string(bitmap_from_base64("AQEAAAA=")) | -+-----------------------------------+ -| 1 | -+-----------------------------------+ - -mysql> select bitmap_to_string(bitmap_from_base64("AjowAAACAAAAAAAAAJgAAAAYAAAAGgAAAAEAf5Y=")); -+----------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_from_base64("AjowAAACAAAAAAAAAJgAAAAYAAAAGgAAAAEAf5Y=")) | -+----------------------------------------------------------------------------------+ -| 1,9999999 | -+----------------------------------------------------------------------------------+ -``` - -### keywords - - BITMAP_FROM_BASE64,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-from-string.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-from-string.md deleted file mode 100644 index 672306960b0b4e..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-from-string.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -{ - "title": "BITMAP_FROM_STRING", - "language": "en" -} ---- - - - -## bitmap_from_string - -### description -#### Syntax - -`BITMAP BITMAP_FROM_STRING(VARCHAR input)` - -Convert a string into a bitmap. The input string should be a comma separated unsigned bigint (ranging from 0 to 18446744073709551615). -For example: input string "0, 1, 2" will be converted to a Bitmap with bit 0, 1, 2 set. -If input string is invalid, return NULL. - -### example - -``` -mysql> select bitmap_to_string(bitmap_from_string("0, 1, 2")); -+-------------------------------------------------+ -| bitmap_to_string(bitmap_from_string('0, 1, 2')) | -+-------------------------------------------------+ -| 0,1,2 | -+-------------------------------------------------+ - -mysql> select bitmap_from_string("-1, 0, 1, 2"); -+-----------------------------------+ -| bitmap_from_string('-1, 0, 1, 2') | -+-----------------------------------+ -| NULL | -+-----------------------------------+ - -mysql> select bitmap_to_string(bitmap_from_string("0, 1, 18446744073709551615")); -+--------------------------------------------------------------------+ -| bitmap_to_string(bitmap_from_string('0, 1, 18446744073709551615')) | -+--------------------------------------------------------------------+ -| 0,1,18446744073709551615 | -+--------------------------------------------------------------------+ -``` - -### keywords - - BITMAP_FROM_STRING,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-has-all.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-has-all.md deleted file mode 100644 index 96168df3e1fb4e..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-has-all.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "BITMAP_HAS_ALL", - "language": "en" -} ---- - - - -## bitmap_has_all -### description -#### Syntax - -`BOOLEAN BITMAP_HAS_ALL(BITMAP lhs, BITMAP rhs)` - -Returns true if the first bitmap contains all the elements of the second bitmap. -Returns true if the second bitmap contains an empty element. - -### example - -``` -mysql> select bitmap_has_all(bitmap_from_string("0, 1, 2"), bitmap_from_string("1, 2")); -+---------------------------------------------------------------------------+ -| bitmap_has_all(bitmap_from_string('0, 1, 2'), bitmap_from_string('1, 2')) | -+---------------------------------------------------------------------------+ -| 1 | -+---------------------------------------------------------------------------+ - -mysql> select bitmap_has_all(bitmap_empty(), bitmap_from_string("1, 2")); -+------------------------------------------------------------+ -| bitmap_has_all(bitmap_empty(), bitmap_from_string('1, 2')) | -+------------------------------------------------------------+ -| 0 | -+------------------------------------------------------------+ -``` - -### keywords - - BITMAP_HAS_ALL,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-has-any.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-has-any.md deleted file mode 100644 index fdb92fa014bce0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-has-any.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "BITMAP_HAS_ANY", - "language": "en" -} ---- - - - -## bitmap_has_any -### description -#### Syntax - -`BOOLEAN BITMAP_HAS_ANY(BITMAP lhs, BITMAP rhs)` - -Calculate whether there are intersecting elements in the two Bitmap columns. The return value is Boolean. - -### example - -``` -mysql> select bitmap_has_any(to_bitmap(1),to_bitmap(2)); -+--------------------------------------------+ -| bitmap_has_any(to_bitmap(1), to_bitmap(2)) | -+--------------------------------------------+ -| 0 | -+--------------------------------------------+ - -mysql> select bitmap_has_any(to_bitmap(1),to_bitmap(1)); -+--------------------------------------------+ -| bitmap_has_any(to_bitmap(1), to_bitmap(1)) | -+--------------------------------------------+ -| 1 | -+--------------------------------------------+ -``` - -### keywords - - BITMAP_HAS_ANY,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-hash.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-hash.md deleted file mode 100644 index d9ef0f82274bf7..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-hash.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -{ - "title": "BITMAP_HASH", - "language": "en" -} ---- - - - -## bitmap_hash - -### Name - -BITMAP_HASH - -### Description - -Calculating hash value for what your input and return a BITMAP which contain the hash value. MurMur3 is used for this function because it is high-performance with low collision rate. More important, the MurMur3 distribution is "simili-random"; the Chi-Square distribution test is used to prove it. By the way, Different hardware platforms and different SEED may change the result of MurMur3. For more information about its performance, see [Smhasher](http://rurban.github.io/smhasher/). - -#### Syntax - -`BITMAP BITMAP_HASH()` - -#### Arguments - -`` -any value or expression. - -#### Return Type - -BITMAP - -#### Remarks - -Generally, MurMurHash 32 is friendly to random, short STRING with low collision rate about one-billionth. But for longer STRING, such as your path of system, can cause more frequent collision. If you indexed your system path, you will find a lot of collisions. - -The following two values are the same. - -```sql -SELECT bitmap_to_string(bitmap_hash('/System/Volumes/Data/Library/Developer/CommandLineTools/SDKs/MacOSX12.3.sdk/System/Library/Frameworks/KernelManagement.framework/KernelManagement.tbd')) AS a , - bitmap_to_string(bitmap_hash('/System/Library/PrivateFrameworks/Install.framework/Versions/Current/Resources/es_419.lproj/Architectures.strings')) AS b; -``` - -Here is the result. - -```text -+-----------+-----------+ -| a | b | -+-----------+-----------+ -| 282251871 | 282251871 | -+-----------+-----------+ -``` - -### Example - -If you want to calculate MurMur3 of a certain value, you can - -``` -select bitmap_to_array(bitmap_hash('hello'))[1]; -``` - -Here is the result. - -```text -+-------------------------------------------------------------+ -| %element_extract%(bitmap_to_array(bitmap_hash('hello')), 1) | -+-------------------------------------------------------------+ -| 1321743225 | -+-------------------------------------------------------------+ -``` - -If you want to `count distinct` some columns, using bitmap has higher performance in some scenes. - -```sql -select bitmap_count(bitmap_union(bitmap_hash(`word`))) from `words`; -``` - -Here is the result. - -```text -+-------------------------------------------------+ -| bitmap_count(bitmap_union(bitmap_hash(`word`))) | -+-------------------------------------------------+ -| 33263478 | -+-------------------------------------------------+ -``` - -### Keywords - - BITMAP_HASH,BITMAP - -### Best Practice - -For more information, see also: -- [BITMAP_HASH64](./bitmap_hash64.md) diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-hash64.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-hash64.md deleted file mode 100644 index 0db90aaa5efcf9..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-hash64.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -{ - "title": "BITMAP_HASH64", - "language": "en" -} ---- - - - -## bitmap_hash64 -### description -#### Syntax - -`BITMAP BITMAP_HASH64(expr)` - -Compute the 64-bits hash value of a expr of any type, then return a bitmap containing that hash value. Mainly be used to load non-integer value into bitmap column, e.g., - -``` -cat data | curl --location-trusted -u user:passwd -T - -H "columns: dt,page,device_id, device_id=bitmap_hash64(device_id)" http://host:8410/api/test/testDb/_stream_load -``` - -### example - -``` -mysql> select bitmap_to_string(bitmap_hash64('hello')); -+------------------------------------------+ -| bitmap_to_string(bitmap_hash64('hello')) | -+------------------------------------------+ -| 15231136565543391023 | -+------------------------------------------+ -``` - -### keywords - - BITMAP_HASH,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-intersect.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-intersect.md deleted file mode 100644 index 8795961b480964..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-intersect.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "BITMAP_INTERSECT", - "language": "en" -} ---- - - - -## bitmap_intersect -### description - -Aggregation function, used to calculate the bitmap intersection after grouping. Common usage scenarios such as: calculating user retention rate. - -#### Syntax - -`BITMAP BITMAP_INTERSECT(BITMAP value)` - -Enter a set of bitmap values, find the intersection of the set of bitmap values, and return. - -### example - -Table schema - -``` -KeysType: AGG_KEY -Columns: tag varchar, date datetime, user_id bitmap bitmap_union -``` - -``` -Find the retention of users between 2020-05-18 and 2020-05-19 under different tags. -mysql> select tag, bitmap_intersect(user_id) from (select tag, date, bitmap_union(user_id) user_id from table where date in ('2020-05-18', '2020-05-19') group by tag, date) a group by tag; -``` - -Used in combination with the bitmap_to_string function to obtain the specific data of the intersection - -``` -Who are the users retained under different tags between 2020-05-18 and 2020-05-19? -mysql> select tag, bitmap_to_string(bitmap_intersect(user_id)) from (select tag, date, bitmap_union(user_id) user_id from table where date in ('2020-05-18', '2020-05-19') group by tag, date) a group by tag; -``` - -### keywords - - BITMAP_INTERSECT, BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-max.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-max.md deleted file mode 100644 index b284b541b30bcb..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-max.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "BITMAP_MAX", - "language": "en" -} ---- - - - -## bitmap_max -### description -#### Syntax - -`BIGINT BITMAP_MAX(BITMAP input)` - -Calculate and return the max values of a bitmap. - -### example - -``` -mysql> select bitmap_max(bitmap_from_string('')) value; -+-------+ -| value | -+-------+ -| NULL | -+-------+ - -mysql> select bitmap_max(bitmap_from_string('1,9999999999')) value; -+------------+ -| value | -+------------+ -| 9999999999 | -+------------+ -``` - -### keywords - - BITMAP_MAX,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-min.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-min.md deleted file mode 100644 index 795e6ac9b0cea0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-min.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "BITMAP_MIN", - "language": "en" -} ---- - - - -## bitmap_min -### description -#### Syntax - -`BIGINT BITMAP_MIN(BITMAP input)` - -Calculate and return the min values of a bitmap. - -### example - -``` -mysql> select bitmap_min(bitmap_from_string('')) value; -+-------+ -| value | -+-------+ -| NULL | -+-------+ - -mysql> select bitmap_min(bitmap_from_string('1,9999999999')) value; -+-------+ -| value | -+-------+ -| 1 | -+-------+ -``` - -### keywords - - BITMAP_MIN,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-not.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-not.md deleted file mode 100644 index bc6634c70cd39d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-not.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "BITMAP_NOT", - "language": "en" -} ---- - - - -## bitmap_not -### description -#### Syntax - -`BITMAP BITMAP_NOT(BITMAP lhs, BITMAP rhs)` - -Calculate the set after lhs minus rhs, return the new bitmap. - -### example - -``` -mysql> select bitmap_to_string(bitmap_not(bitmap_from_string('2,3'),bitmap_from_string('1,2,3,4'))); -+----------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_not(bitmap_from_string('2,3'), bitmap_from_string('1,2,3,4'))) | -+----------------------------------------------------------------------------------------+ -| | -+----------------------------------------------------------------------------------------+ - -mysql> select bitmap_to_string(bitmap_not(bitmap_from_string('2,3,5'),bitmap_from_string('1,2,3,4'))); -+------------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_not(bitmap_from_string('2,3,5'), bitmap_from_string('1,2,3,4'))) | -+------------------------------------------------------------------------------------------+ -| 5 | -+------------------------------------------------------------------------------------------+ -``` - -### keywords - - BITMAP_NOT,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-or-count.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-or-count.md deleted file mode 100644 index 2ffc0057a95570..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-or-count.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -{ - "title": "BITMAP_OR_COUNT", - "language": "en" -} ---- - - - -## bitmap_or_count -### description -#### Syntax - -`BigIntVal bitmap_or_count(BITMAP lhs, BITMAP rhs)` - -Calculates the union of two or more input bitmaps and returns the number of union sets. - -### example - -``` -MySQL> select bitmap_or_count(bitmap_from_string('1,2,3'),bitmap_empty()); -+--------------------------------------------------------------+ -| bitmap_or_count(bitmap_from_string('1,2,3'), bitmap_empty()) | -+--------------------------------------------------------------+ -| 3 | -+--------------------------------------------------------------+ - - -MySQL> select bitmap_or_count(bitmap_from_string('1,2,3'),bitmap_from_string('1,2,3')); -+---------------------------------------------------------------------------+ -| bitmap_or_count(bitmap_from_string('1,2,3'), bitmap_from_string('1,2,3')) | -+---------------------------------------------------------------------------+ -| 3 | -+---------------------------------------------------------------------------+ - -MySQL> select bitmap_or_count(bitmap_from_string('1,2,3'),bitmap_from_string('3,4,5')); -+---------------------------------------------------------------------------+ -| bitmap_or_count(bitmap_from_string('1,2,3'), bitmap_from_string('3,4,5')) | -+---------------------------------------------------------------------------+ -| 5 | -+---------------------------------------------------------------------------+ - -MySQL> select bitmap_or_count(bitmap_from_string('1,2,3'), bitmap_from_string('3,4,5'), to_bitmap(100), bitmap_empty()); -+-----------------------------------------------------------------------------------------------------------+ -| bitmap_or_count(bitmap_from_string('1,2,3'), bitmap_from_string('3,4,5'), to_bitmap(100), bitmap_empty()) | -+-----------------------------------------------------------------------------------------------------------+ -| 6 | -+-----------------------------------------------------------------------------------------------------------+ - -MySQL> select bitmap_or_count(bitmap_from_string('1,2,3'), bitmap_from_string('3,4,5'), to_bitmap(100), NULL); -+-------------------------------------------------------------------------------------------------+ -| bitmap_or_count(bitmap_from_string('1,2,3'), bitmap_from_string('3,4,5'), to_bitmap(100), NULL) | -+-------------------------------------------------------------------------------------------------+ -| NULL | -+-------------------------------------------------------------------------------------------------+ -``` - -### keywords - - BITMAP_OR_COUNT,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-or.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-or.md deleted file mode 100644 index d9f2798084edd9..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-or.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -{ - "title": "BITMAP_OR", - "language": "en" -} ---- - - - -## bitmap_or -### description -#### Syntax - -`BITMAP BITMAP_OR(BITMAP lhs, BITMAP rhs, ...)` - -Compute union of two or more input bitmaps, returns the new bitmap. - -### example - -``` -mysql> select bitmap_count(bitmap_or(to_bitmap(1), to_bitmap(1))) cnt; -+------+ -| cnt | -+------+ -| 1 | -+------+ - -mysql> select bitmap_to_string(bitmap_or(to_bitmap(1), to_bitmap(1))) ; -+---------------------------------------------------------+ -| bitmap_to_string(bitmap_or(to_bitmap(1), to_bitmap(1))) | -+---------------------------------------------------------+ -| 1 | -+---------------------------------------------------------+ - -mysql> select bitmap_count(bitmap_or(to_bitmap(1), to_bitmap(2))) cnt; -+------+ -| cnt | -+------+ -| 2 | -+------+ - -mysql> select bitmap_to_string(bitmap_or(to_bitmap(1), to_bitmap(2))); -+---------------------------------------------------------+ -| bitmap_to_string(bitmap_or(to_bitmap(1), to_bitmap(2))) | -+---------------------------------------------------------+ -| 1,2 | -+---------------------------------------------------------+ - -mysql> select bitmap_to_string(bitmap_or(to_bitmap(1), to_bitmap(2), to_bitmap(10), to_bitmap(0), NULL)); -+--------------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_or(to_bitmap(1), to_bitmap(2), to_bitmap(10), to_bitmap(0), NULL)) | -+--------------------------------------------------------------------------------------------+ -| 0,1,2,10 | -+--------------------------------------------------------------------------------------------+ - -mysql> select bitmap_to_string(bitmap_or(to_bitmap(1), to_bitmap(2),to_bitmap(10),to_bitmap(0),bitmap_empty())); -+------------------------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_or(to_bitmap(1), to_bitmap(2), to_bitmap(10), to_bitmap(0), bitmap_empty())) | -+------------------------------------------------------------------------------------------------------+ -| 0,1,2,10 | -+------------------------------------------------------------------------------------------------------+ - -mysql> select bitmap_to_string(bitmap_or(to_bitmap(10), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'))) ; -+--------------------------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_or(to_bitmap(10), bitmap_from_string('1,2'), bitmap_from_string('1,2,3,4,5'))) | -+--------------------------------------------------------------------------------------------------------+ -| 1,2,3,4,5,10 | -+--------------------------------------------------------------------------------------------------------+ -``` - -### keywords - - BITMAP_OR,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-remove.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-remove.md deleted file mode 100644 index 32e699c55b96fd..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-remove.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "BITMAP_REMOVE", - "language": "en" -} ---- - - - -## bitmap_remove -### description -#### Syntax - -`BITMAP BITMAP_REMOVE(BITMAP bitmap, BIGINT input)` - -Remove the specified value from bitmap. - -### example - -``` -mysql [(none)]>select bitmap_to_string(bitmap_remove(bitmap_from_string('1, 2, 3'), 3)) res; -+------+ -| res | -+------+ -| 1,2 | -+------+ - -mysql [(none)]>select bitmap_to_string(bitmap_remove(bitmap_from_string('1, 2, 3'), null)) res; -+------+ -| res | -+------+ -| NULL | -+------+ -``` - -### keywords - - BITMAP_REMOVE,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-subset-in-range.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-subset-in-range.md deleted file mode 100644 index b9177a221322ea..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-subset-in-range.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "BITMAP_SUBSET_IN_RANGE", - "language": "en" -} ---- - - - -## bitmap_subset_in_range - -### Description - -#### Syntax - -`BITMAP BITMAP_SUBSET_IN_RANGE(BITMAP src, BIGINT range_start, BIGINT range_end)` - -Return subset in specified range (not include the range_end). - -### example - -``` -mysql> select bitmap_to_string(bitmap_subset_in_range(bitmap_from_string('1,2,3,4,5'), 0, 9)) value; -+-----------+ -| value | -+-----------+ -| 1,2,3,4,5 | -+-----------+ - -mysql> select bitmap_to_string(bitmap_subset_in_range(bitmap_from_string('1,2,3,4,5'), 2, 3)) value; -+-------+ -| value | -+-------+ -| 2 | -+-------+ -``` - -### keywords - - BITMAP_SUBSET_IN_RANGE,BITMAP_SUBSET,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-subset-limit.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-subset-limit.md deleted file mode 100644 index 9426210a01002e..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-subset-limit.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -{ - "title": "BITMAP_SUBSET_LIMIT", - "language": "en" -} ---- - - - -## bitmap_subset_limit - -### Description - -#### Syntax - -`BITMAP BITMAP_SUBSET_LIMIT(BITMAP src, BIGINT range_start, BIGINT cardinality_limit)` - -Create subset of the BITMAP, begin with range from range_start, limit by cardinality_limit -range_start: start value for the range -cardinality_limit: subset upper limit - -### example - -``` -mysql> select bitmap_to_string(bitmap_subset_limit(bitmap_from_string('1,2,3,4,5'), 0, 3)) value; -+-----------+ -| value | -+-----------+ -| 1,2,3 | -+-----------+ - -mysql> select bitmap_to_string(bitmap_subset_limit(bitmap_from_string('1,2,3,4,5'), 4, 3)) value; -+-------+ -| value | -+-------+ -| 4,5 | -+-------+ -``` - -### keywords - - BITMAP_SUBSET_LIMIT,BITMAP_SUBSET,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-to-array.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-to-array.md deleted file mode 100644 index edcac52dd16264..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-to-array.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -{ - "title": "BITMAP_TO_ARRAY", - "language": "en" -} ---- - - - -## bitmap_to_array - -### description -#### Syntax - -`ARRAY_BIGINT bitmap_to_array(BITMAP input)` - -Convert a input BITMAP to Array. -If input is null, return null. - -### example - -``` -mysql> select bitmap_to_array(null); -+------------------------+ -| bitmap_to_array(NULL) | -+------------------------+ -| NULL | -+------------------------+ - -mysql> select bitmap_to_array(bitmap_empty()); -+---------------------------------+ -| bitmap_to_array(bitmap_empty()) | -+---------------------------------+ -| [] | -+---------------------------------+ - -mysql> select bitmap_to_array(to_bitmap(1)); -+-------------------------------+ -| bitmap_to_array(to_bitmap(1)) | -+-------------------------------+ -| [1] | -+-------------------------------+ - -mysql> select bitmap_to_array(bitmap_from_string('1,2,3,4,5')); -+--------------------------------------------------+ -| bitmap_to_array(bitmap_from_string('1,2,3,4,5')) | -+--------------------------------------------------+ -| [1, 2, 3, 4, 5] | -+-------------------------------------------------- - -``` - -### keywords - - BITMAP_TO_ARRAY,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-to-base64.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-to-base64.md deleted file mode 100644 index 9d4dd298fa09d3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-to-base64.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -{ - "title": "BITMAP_TO_BASE64", - "language": "en" -} ---- - - - -## bitmap_to_base64 - -### description -#### Syntax - -`VARCHAR BITMAP_TO_BASE64(BITMAP input)` - -Convert an input BITMAP to a base64 string. If input is null, return null. Since BE config item `enable_set_in_bitmap_value` will change the format of bitmap value in memory, it also affect the result of this function. - -### example - -``` -mysql> select bitmap_to_base64(null); -+------------------------+ -| bitmap_to_base64(NULL) | -+------------------------+ -| NULL | -+------------------------+ - -mysql> select bitmap_to_base64(bitmap_empty()); -+----------------------------------+ -| bitmap_to_base64(bitmap_empty()) | -+----------------------------------+ -| AA== | -+----------------------------------+ - -mysql> select bitmap_to_base64(to_bitmap(1)); -+--------------------------------+ -| bitmap_to_base64(to_bitmap(1)) | -+--------------------------------+ -| AQEAAAA= | -+--------------------------------+ - -mysql> select bitmap_to_base64(bitmap_from_string("1,9999999")); -+---------------------------------------------------------+ -| bitmap_to_base64(bitmap_from_string("1,9999999")) | -+---------------------------------------------------------+ -| AjowAAACAAAAAAAAAJgAAAAYAAAAGgAAAAEAf5Y= | -+---------------------------------------------------------+ - -``` - -### keywords - - BITMAP_TO_BASE64,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-to-string.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-to-string.md deleted file mode 100644 index dd1734012c31f9..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-to-string.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -{ - "title": "BITMAP_TO_STRING", - "language": "en" -} ---- - - - -## bitmap_to_string - -### description -#### Syntax - -`VARCHAR BITMAP_TO_STRING(BITMAP input)` - -Convert a input BITMAP to a string. The string is a separated string, contains all set bits in Bitmap. -If input is null, return null. - -### example - -``` -mysql> select bitmap_to_string(null); -+------------------------+ -| bitmap_to_string(NULL) | -+------------------------+ -| NULL | -+------------------------+ - -mysql> select bitmap_to_string(bitmap_empty()); -+----------------------------------+ -| bitmap_to_string(bitmap_empty()) | -+----------------------------------+ -| | -+----------------------------------+ - -mysql> select bitmap_to_string(to_bitmap(1)); -+--------------------------------+ -| bitmap_to_string(to_bitmap(1)) | -+--------------------------------+ -| 1 | -+--------------------------------+ - -mysql> select bitmap_to_string(bitmap_or(to_bitmap(1), to_bitmap(2))); -+---------------------------------------------------------+ -| bitmap_to_string(bitmap_or(to_bitmap(1), to_bitmap(2))) | -+---------------------------------------------------------+ -| 1,2 | -+---------------------------------------------------------+ - -``` - -### keywords - - BITMAP_TO_STRING,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-union.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-union.md deleted file mode 100644 index ca2f66cf1b0372..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-union.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -{ - "title": "BITMAP_UNION", - "language": "en" -} ---- - - - -## bitmap_union function - -### description - -Aggregate function, used to calculate the grouped bitmap union. Common usage scenarios such as: calculating PV, UV. - -#### Syntax - -`BITMAP BITMAP_UNION(BITMAP value)` - -Enter a set of bitmap values, find the union of this set of bitmap values, and return. - -### example - -``` -mysql> select page_id, bitmap_union(user_id) from table group by page_id; -``` - -Combined with the bitmap_count function, the PV data of the web page can be obtained - -``` -mysql> select page_id, bitmap_count(bitmap_union(user_id)) from table group by page_id; -``` - -When the user_id field is int, the above query semantics is equivalent to - -``` -mysql> select page_id, count(distinct user_id) from table group by page_id; -``` - -### keywords - - BITMAP_UNION, BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-xor-count.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-xor-count.md deleted file mode 100644 index 3ddea715e28532..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-xor-count.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -{ - "title": "BITMAP_XOR_COUNT", - "language": "en" -} ---- - - - -## bitmap_xor_count - -### description - -#### Syntax - -`BIGINT BITMAP_XOR_COUNT(BITMAP lhs, BITMAP rhs, ...)` - -XOR two or more bitmap sets and return the size of the result set. - -### example - -``` -mysql> select bitmap_xor_count(bitmap_from_string('1,2,3'),bitmap_from_string('3,4,5')); -+----------------------------------------------------------------------------+ -| bitmap_xor_count(bitmap_from_string('1,2,3'), bitmap_from_string('3,4,5')) | -+----------------------------------------------------------------------------+ -| 4 | -+----------------------------------------------------------------------------+ - -mysql> select bitmap_xor_count(bitmap_from_string('1,2,3'),bitmap_from_string('1,2,3')); -+----------------------------------------------------------------------------+ -| bitmap_xor_count(bitmap_from_string('1,2,3'), bitmap_from_string('1,2,3')) | -+----------------------------------------------------------------------------+ -| 0 | -+----------------------------------------------------------------------------+ - -mysql> select bitmap_xor_count(bitmap_from_string('1,2,3'),bitmap_from_string('4,5,6')); -+----------------------------------------------------------------------------+ -| bitmap_xor_count(bitmap_from_string('1,2,3'), bitmap_from_string('4,5,6')) | -+----------------------------------------------------------------------------+ -| 6 | -+----------------------------------------------------------------------------+ - -MySQL> select (bitmap_xor_count(bitmap_from_string('2,3'),bitmap_from_string('1,2,3,4'),bitmap_from_string('3,4,5'))); -+-----------------------------------------------------------------------------------------------------------+ -| (bitmap_xor_count(bitmap_from_string('2,3'), bitmap_from_string('1,2,3,4'), bitmap_from_string('3,4,5'))) | -+-----------------------------------------------------------------------------------------------------------+ -| 3 | -+-----------------------------------------------------------------------------------------------------------+ - -MySQL> select (bitmap_xor_count(bitmap_from_string('2,3'),bitmap_from_string('1,2,3,4'),bitmap_from_string('3,4,5'),bitmap_empty())); -+---------------------------------------------------------------------------------------------------------------------------+ -| (bitmap_xor_count(bitmap_from_string('2,3'), bitmap_from_string('1,2,3,4'), bitmap_from_string('3,4,5'), bitmap_empty())) | -+---------------------------------------------------------------------------------------------------------------------------+ -| 3 | -+---------------------------------------------------------------------------------------------------------------------------+ - -MySQL> select (bitmap_xor_count(bitmap_from_string('2,3'),bitmap_from_string('1,2,3,4'),bitmap_from_string('3,4,5'),NULL)); -+-----------------------------------------------------------------------------------------------------------------+ -| (bitmap_xor_count(bitmap_from_string('2,3'), bitmap_from_string('1,2,3,4'), bitmap_from_string('3,4,5'), NULL)) | -+-----------------------------------------------------------------------------------------------------------------+ -| NULL | -+-----------------------------------------------------------------------------------------------------------------+ -``` - -### keywords - - BITMAP_XOR_COUNT,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-xor.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-xor.md deleted file mode 100644 index 6e82b38b4d98d6..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/bitmap-xor.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -{ - "title": "BITMAP_XOR", - "language": "en" -} ---- - - - -## bitmap_xor -### description -#### Syntax - -`BITMAP BITMAP_XOR(BITMAP lhs, BITMAP rhs, ...)` - -Compute the symmetric union of two or more input bitmaps, return the new bitmap. - -### example - -``` -mysql> select bitmap_count(bitmap_xor(bitmap_from_string('2,3'),bitmap_from_string('1,2,3,4'))) cnt; -+------+ -| cnt | -+------+ -| 2 | -+------+ - -mysql> select bitmap_to_string(bitmap_xor(bitmap_from_string('2,3'),bitmap_from_string('1,2,3,4'))); -+----------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_xor(bitmap_from_string('2,3'), bitmap_from_string('1,2,3,4'))) | -+----------------------------------------------------------------------------------------+ -| 1,4 | -+----------------------------------------------------------------------------------------+ - -MySQL> select bitmap_to_string(bitmap_xor(bitmap_from_string('2,3'),bitmap_from_string('1,2,3,4'),bitmap_from_string('3,4,5'))); -+---------------------------------------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_xor(bitmap_from_string('2,3'), bitmap_from_string('1,2,3,4'), bitmap_from_string('3,4,5'))) | -+---------------------------------------------------------------------------------------------------------------------+ -| 1,3,5 | -+---------------------------------------------------------------------------------------------------------------------+ - -MySQL> select bitmap_to_string(bitmap_xor(bitmap_from_string('2,3'),bitmap_from_string('1,2,3,4'),bitmap_from_string('3,4,5'),bitmap_empty())); -+-------------------------------------------------------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_xor(bitmap_from_string('2,3'), bitmap_from_string('1,2,3,4'), bitmap_from_string('3,4,5'), bitmap_empty())) | -+-------------------------------------------------------------------------------------------------------------------------------------+ -| 1,3,5 | -+-------------------------------------------------------------------------------------------------------------------------------------+ - -MySQL> select bitmap_to_string(bitmap_xor(bitmap_from_string('2,3'),bitmap_from_string('1,2,3,4'),bitmap_from_string('3,4,5'),NULL)); -+---------------------------------------------------------------------------------------------------------------------------+ -| bitmap_to_string(bitmap_xor(bitmap_from_string('2,3'), bitmap_from_string('1,2,3,4'), bitmap_from_string('3,4,5'), NULL)) | -+---------------------------------------------------------------------------------------------------------------------------+ -| NULL | -+---------------------------------------------------------------------------------------------------------------------------+ -``` - -### keywords - - BITMAP_XOR,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/intersect-count.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/intersect-count.md deleted file mode 100644 index eabb298d94ef71..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/intersect-count.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ -"title": "INTERSECT_COUNT", -"language": "en" -} ---- - - - -## intersect_count -### description -#### Syntax - -`BITMAP INTERSECT_COUNT(bitmap_column, column_to_filter, filter_values)` -Calculate the intersection of two or more bitmaps -Usage: intersect_count(bitmap_column_to_count, filter_column, filter_values ...) -Example: intersect_count(user_id, event, 'A', 'B', 'C'), meaning find the intersect count of user_id in all A/B/C 3 bitmaps - -### example - -``` -MySQL [test_query_qa]> select dt,bitmap_to_string(user_id) from pv_bitmap where dt in (3,4); -+------+-----------------------------+ -| dt | bitmap_to_string(`user_id`) | -+------+-----------------------------+ -| 4 | 1,2,3 | -| 3 | 1,2,3,4,5 | -+------+-----------------------------+ -2 rows in set (0.012 sec) - -MySQL [test_query_qa]> select intersect_count(user_id,dt,3,4) from pv_bitmap; -+----------------------------------------+ -| intersect_count(`user_id`, `dt`, 3, 4) | -+----------------------------------------+ -| 3 | -+----------------------------------------+ -1 row in set (0.014 sec) -``` - -### keywords - - INTERSECT_COUNT,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-expr-calculate-count.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-expr-calculate-count.md deleted file mode 100644 index 3562e3fcd34df5..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-expr-calculate-count.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ -"title": "ORTHOGONAL_BITMAP_EXPR_CALCULATE_COUNT", -"language": "en" -} ---- - - - -## orthogonal_bitmap_expr_calculate_count -### description -#### Syntax - -`BITMAP ORTHOGONAL_BITMAP_EXPR_CALCULATE_COUNT(bitmap_column, column_to_filter, input_string)` -Calculate the bitmap intersection, union and difference set of expressions to calculate the count function. The first parameter is the Bitmap column, the second parameter is the dimension column used for filtering, that is, the calculated key column, and the third parameter is the calculation expression string, meaning that the bitmap intersection, union and difference set expression is calculated according to the key column -The calculators supported by the expression:&represents intersection calculation, | represents union calculation, - represents difference calculation, ^ represents XOR calculation, and \ represents escape characters - -### example - -``` -select orthogonal_bitmap_expr_calculate_count(user_id, tag, '(833736|999777)&(1308083|231207)&(1000|20000-30000)') from user_tag_bitmap where tag in (833736,999777,130808,231207,1000,20000,30000); -Note: 1000, 20000, 30000 plastic tags represent different labels of users -``` - -``` -select orthogonal_bitmap_expr_calculate_count(user_id, tag, '(A:a/b|B:2\\-4)&(C:1-D:12)&E:23') from user_str_tag_bitmap where tag in ('A:a/b', 'B:2-4', 'C:1', 'D:12', 'E:23'); -Note: 'A:a/b', 'B:2-4', etc. are string types tag, representing different labels of users, where 'B:2-4' needs to be escaped as'B:2\\-4' -``` - -### keywords - - ORTHOGONAL_BITMAP_EXPR_CALCULATE_COUNT,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-expr-calculate.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-expr-calculate.md deleted file mode 100644 index e25c3611cfa0d3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-expr-calculate.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ -"title": "ORTHOGONAL_BITMAP_EXPR_CALCULATE", -"language": "en" -} ---- - - - -## orthogonal_bitmap_expr_calculate -### description -#### Syntax - -`BITMAP ORTHOGONAL_BITMAP_EXPR_CALCULATE(bitmap_column, column_to_filter, input_string)` -The first parameter is the Bitmap column, the second parameter is the dimension column used for filtering, that is, the calculated key column, and the third parameter is the calculation expression string, meaning that the bitmap intersection, union, and difference set expression is calculated according to the key column -The calculators supported by the expression:&represents intersection calculation, | represents union calculation, - represents difference calculation, ^ represents XOR calculation, and \ represents escape characters - -### example - -``` -select orthogonal_bitmap_expr_calculate(user_id, tag, '(833736|999777)&(1308083|231207)&(1000|20000-30000)') from user_tag_bitmap where tag in (833736,999777,130808,231207,1000,20000,30000); -Note: 1000, 20000, 30000 plastic tags represent different labels of users -``` - -``` -select orthogonal_bitmap_expr_calculate(user_id, tag, '(A:a/b|B:2\\-4)&(C:1-D:12)&E:23') from user_str_tag_bitmap where tag in ('A:a/b', 'B:2-4', 'C:1', 'D:12', 'E:23'); -Note: 'A:a/b', 'B:2-4', etc. are string types tag, representing different labels of users, where 'B:2-4' needs to be escaped as'B:2\\-4' -``` - -### keywords - - ORTHOGONAL_BITMAP_EXPR_CALCULATE,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-intersect-count.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-intersect-count.md deleted file mode 100644 index 3884e4f47151f3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-intersect-count.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -{ -"title": "ORTHOGONAL_BITMAP_INTERSECT_COUNT", -"language": "en" -} ---- - - - -## orthogonal_bitmap_intersect_count -### description -#### Syntax - -`BITMAP ORTHOGONAL_BITMAP_INTERSECT_COUNT(bitmap_column, column_to_filter, filter_values)` -The bitmap intersection count function, the first parameter is the bitmap column, the second parameter is the dimension column for filtering, and the third parameter is the variable length parameter, which means different values of the filter dimension column - -### example - -``` -mysql> select orthogonal_bitmap_intersect_count(members, tag_group, 1150000, 1150001, 390006) from tag_map where tag_group in ( 1150000, 1150001, 390006); -+-------------------------------------------------------------------------------------+ -| orthogonal_bitmap_intersect_count(`members`, `tag_group`, 1150000, 1150001, 390006) | -+-------------------------------------------------------------------------------------+ -| 0 | -+-------------------------------------------------------------------------------------+ -1 row in set (3.382 sec) -``` - -### keywords - - ORTHOGONAL_BITMAP_INTERSECT_COUNT,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-intersect.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-intersect.md deleted file mode 100644 index c7be4fdcf6e803..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-intersect.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ -"title": "ORTHOGONAL_BITMAP_INTERSECT", -"language": "en" -} ---- - - - -## orthogonal_bitmap_intersect -### description -#### Syntax - -`BITMAP ORTHOGONAL_BITMAP_INTERSECT(bitmap_column, column_to_filter, filter_values)` -The bitmap intersection function, the first parameter is the bitmap column, the second parameter is the dimension column for filtering, and the third parameter is the variable length parameter, which means different values of the filter dimension column - -### example - -``` -mysql> select orthogonal_bitmap_intersect(members, tag_group, 1150000, 1150001, 390006) from tag_map where tag_group in ( 1150000, 1150001, 390006); -+-------------------------------------------------------------------------------+ -| orthogonal_bitmap_intersect(`members`, `tag_group`, 1150000, 1150001, 390006) | -+-------------------------------------------------------------------------------+ -| NULL | -+-------------------------------------------------------------------------------+ -1 row in set (3.505 sec) - -``` - -### keywords - - ORTHOGONAL_BITMAP_INTERSECT,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-union-count.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-union-count.md deleted file mode 100644 index 18d5fc76f1a2e9..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/orthogonal-bitmap-union-count.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -{ -"title": "ORTHOGONAL_BITMAP_UNION_COUNT", -"language": "en" -} ---- - - - -## orthogonal_bitmap_union_count -### description -#### Syntax - -`BITMAP ORTHOGONAL_BITMAP_UNION_COUNT(bitmap_column, column_to_filter, filter_values)` -Figure out the bitmap union count function, syntax with the original bitmap_union_count, but the implementation is different. - -### example - -``` -mysql> select orthogonal_bitmap_union_count(members) from tag_map where tag_group in ( 1150000, 1150001, 390006); -+------------------------------------------+ -| orthogonal_bitmap_union_count(`members`) | -+------------------------------------------+ -| 286957811 | -+------------------------------------------+ -1 row in set (2.645 sec) -``` - -### keywords - - ORTHOGONAL_BITMAP_UNION_COUNT,BITMAP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/sub-bitmap.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/sub-bitmap.md deleted file mode 100644 index 3a49ebab72dbca..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/sub-bitmap.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "SUB_BITMAP", - "language": "en" -} ---- - - - -## sub_bitmap - -### description -#### Syntax - -`BITMAP SUB_BITMAP(BITMAP src, BIGINT offset, BIGINT cardinality_limit)` - -Starting from the position specified by offset, intercept cardinality_limit bitmap elements and return a bitmap subset. - -### example - -``` -mysql> select bitmap_to_string(sub_bitmap(bitmap_from_string('1,0,1,2,3,1,5'), 0, 3)) value; -+-------+ -| value | -+-------+ -| 0,1,2 | -+-------+ - -mysql> select bitmap_to_string(sub_bitmap(bitmap_from_string('1,0,1,2,3,1,5'), -3, 2)) value; -+-------+ -| value | -+-------+ -| 2,3 | -+-------+ - -mysql> select bitmap_to_string(sub_bitmap(bitmap_from_string('1,0,1,2,3,1,5'), 2, 100)) value; -+-------+ -| value | -+-------+ -| 2,3,5 | -+-------+ -``` - -### keywords - - SUB_BITMAP,BITMAP_SUBSET,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/to-bitmap.md b/docs/en/docs/sql-manual/sql-functions/bitmap-functions/to-bitmap.md deleted file mode 100644 index 0ef3a2a3513cc3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitmap-functions/to-bitmap.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "TO_BITMAP", - "language": "en" -} ---- - - - -## to_bitmap -### description -#### Syntax - -`BITMAP TO_BITMAP(expr)` - -Convert an unsigned bigint (ranging from 0 to 18446744073709551615) to a bitmap containing that value. -Null will be return when the input value is not in this range. -Mainly be used to load integer value into bitmap column, e.g., - -``` -cat data | curl --location-trusted -u user:passwd -T - -H "columns: dt,page,user_id, user_id=to_bitmap(user_id)" http://host:8410/api/test/testDb/_stream_load -``` - -### example - -``` -mysql> select bitmap_count(to_bitmap(10)); -+-----------------------------+ -| bitmap_count(to_bitmap(10)) | -+-----------------------------+ -| 1 | -+-----------------------------+ - -MySQL> select bitmap_to_string(to_bitmap(-1)); -+---------------------------------+ -| bitmap_to_string(to_bitmap(-1)) | -+---------------------------------+ -| | -+---------------------------------+ -``` - -### keywords - - TO_BITMAP,BITMAP diff --git a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitand.md b/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitand.md deleted file mode 100644 index 2b22e80945e5bd..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitand.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ -"title": "BITAND", -"language": "en" -} ---- - - - -## bitand -### description -#### Syntax - -`BITAND(Integer-type lhs, Integer-type rhs)` - -Returns the result of the AND operation of two integers. - -Integer range: TINYINT、SMALLINT、INT、BIGINT、LARGEINT - -### example - -``` -mysql> select bitand(3,5) ans; -+------+ -| ans | -+------+ -| 1 | -+------+ - -mysql> select bitand(4,7) ans; -+------+ -| ans | -+------+ -| 4 | -+------+ -``` - -### keywords - - BITAND diff --git a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitcount.md b/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitcount.md deleted file mode 100644 index 2518e35f2f125f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitcount.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -{ -"title": "BIT_COUNT", -"language": "en" -} ---- - - - -## bit_count -### description -#### Syntax - -`BIT_COUNT(Integer-type x)` - -Returns the exist count of one in 2's complement represent of integer x. - -Integer-type could be: TINYINT、SMALLINT、INT、BIGINT、LARGEINT - -### example - -``` -select "0b11111111", bit_count(-1) --------------- - -+--------------+---------------+ -| '0b11111111' | bit_count(-1) | -+--------------+---------------+ -| 0b11111111 | 8 | -+--------------+---------------+ -``` - -### keywords - - BITCOUNT, BIT_COUNT diff --git a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitnot.md b/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitnot.md deleted file mode 100644 index 83464888edf2dc..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitnot.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ -"title": "BITNOT", -"language": "en" -} ---- - - - -## bitnot -### description -#### Syntax - -`BITNOT(Integer-type value)` - -Returns the result of the NOT operation of one integer. - -Integer range: TINYINT、SMALLINT、INT、BIGINT、LARGEINT - -### example - -``` -mysql> select bitnot(7) ans; -+------+ -| ans | -+------+ -| -8 | -+------+ - -mysql> select bitxor(-127) ans; -+------+ -| ans | -+------+ -| 126 | -+------+ -``` - -### keywords - - BITNOT diff --git a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitor.md b/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitor.md deleted file mode 100644 index 1a9357db7b9ddd..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitor.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ -"title": "BITOR", -"language": "en" -} ---- - - - -## bitor -### description -#### Syntax - -`BITOR(Integer-type lhs, Integer-type rhs)` - -Returns the result of the OR operation of two integers. - -Integer range: TINYINT、SMALLINT、INT、BIGINT、LARGEINT - -### example - -``` -mysql> select bitor(3,5) ans; -+------+ -| ans | -+------+ -| 7 | -+------+ - -mysql> select bitor(4,7) ans; -+------+ -| ans | -+------+ -| 7 | -+------+ -``` - -### keywords - - BITOR diff --git a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitshiftleft.md b/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitshiftleft.md deleted file mode 100644 index b48313c242b971..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitshiftleft.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -{ -"title": "BIT_SHIFT_LEFT", -"language": "en" -} ---- - - - -## bit_shift_left -### description -#### syntax - -`BIT_SHIFT_LEFT(BIGINT x, TINYINT c)` - -Do logical left shift to `BIGINT` type x by c bits, and return result as a `BIGINT`. -Return zero if `c` is less than 0. - -### example -Normal case -```sql -select 8 as x, number as c, bit_shift_left(8, number) as bit_shift_left from numbers("number"="5") --------------- - -+------+------+----------------+ -| x | c | bit_shift_left | -+------+------+----------------+ -| 8 | 0 | 8 | -| 8 | 1 | 16 | -| 8 | 2 | 32 | -| 8 | 3 | 64 | -| 8 | 4 | 128 | -+------+------+----------------+ -5 rows in set (0.04 sec) -``` -Left shift result of `9223372036854775807` which is `BIGINT_MAX` by 1 bit will get -2. -```sql -WITH tbl AS ( - SELECT 9223372036854775807 AS BIGINT_MAX -) -SELECT BIGINT_MAX, bit_shift_left(BIGINT_MAX, 1) -FROM tbl --------------- - -+---------------------+-------------------------------+ -| BIGINT_MAX | bit_shift_left(BIGINT_MAX, 1) | -+---------------------+-------------------------------+ -| 9223372036854775807 | -2 | -+---------------------+-------------------------------+ -1 row in set (0.05 sec) -``` -### keywords - - BITSHIFT, BITSHIFTLEFT diff --git a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitshiftright.md b/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitshiftright.md deleted file mode 100644 index 8d44c3421bea5f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitshiftright.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -{ -"title": "BIT_SHIFT_RIGHT", -"language": "en" -} ---- - - - -## bit_shift_right -### description -#### syntax - -`BIT_SHIFT_RIGHT(BIGINT x, TINYINT c)` - -Return result of logical right shift of `BIGINT` type x by c bits. - -### example -Normal case -```sql -select 1024 as x, number as c, bit_shift_right(1024, number) as bit_shift_right from numbers("number"="5") --------------- - -+------+------+-----------------+ -| x | c | bit_shift_right | -+------+------+-----------------+ -| 1024 | 0 | 1024 | -| 1024 | 1 | 512 | -| 1024 | 2 | 256 | -| 1024 | 3 | 128 | -| 1024 | 4 | 64 | -+------+------+-----------------+ -5 rows in set (0.03 sec) -``` -Logical right shift `BIGINT` -1 by 1 bits gets `BIGINT_MAX` -```sql -select bit_shift_right(-1, 1) --------------- - -+------------------------+ -| bit_shift_right(-1, 1) | -+------------------------+ -| 9223372036854775807 | -+------------------------+ -``` -Return zero if `c` is less than 0 -```sql -select bit_shift_right(100, -1) --------------- - -+--------------------------+ -| bit_shift_right(100, -1) | -+--------------------------+ -| 0 | -+--------------------------+ -1 row in set (0.04 sec) -``` - - -### keywords - - BITSHIFT, BITSHIFTRIGHT diff --git a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitxor.md b/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitxor.md deleted file mode 100644 index 689cc2c0c57216..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/bitwise-functions/bitxor.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ -"title": "BITXOR", -"language": "en" -} ---- - - - -## bitxor -### description -#### Syntax - -`BITXOR(Integer-type lhs, Integer-type rhs)` - -Returns the result of the XOR operation of two integers. - -Integer range: TINYINT、SMALLINT、INT、BIGINT、LARGEINT - -### example - -``` -mysql> select bitxor(3,5) ans; -+------+ -| ans | -+------+ -| 7 | -+------+ - -mysql> select bitxor(1,7) ans; -+------+ -| ans | -+------+ -| 6 | -+------+ -``` - -### keywords - - BITXOR diff --git a/docs/en/docs/sql-manual/sql-functions/cast.md b/docs/en/docs/sql-manual/sql-functions/cast.md deleted file mode 100644 index d4b8484f126399..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/cast.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -{ - "title": "CAST", - "language": "en" -} ---- - - - -## CAST -### Description - -#### Syntax - -`T cast (input as Type)` - -Converts input to the specified type - -### example - -1. Turn constant, or a column in a table - -``` -mysql> select cast (1 as BIGINT); -+-------------------+ -| CAST(1 AS BIGINT) | -+-------------------+ -| 1 | -+-------------------+ -``` - -2. Transferred raw data - -``` -curl --location-trusted -u root: -T ~/user_data/bigint -H "columns: tmp_k1, k1=cast(tmp_k1 as BIGINT)" http://host:port/api/test/bigint/_stream_load -``` - -* Note: In the import, because the original type is String, when the original data with floating point value is cast, the data will be converted to NULL, such as 12.0. Doris is currently not truncating raw data. * - -If you want to force this type of raw data cast to int. Look at the following words: - -``` -curl --location-trusted -u root: -T ~/user_data/bigint -H "columns: tmp_k1, k1=cast(cast(tmp_k1 as DOUBLE) as BIGINT)" http://host:port/api/test/bigint/_stream_load - -mysql> select cast(cast ("11.2" as double) as bigint); -+----------------------------------------+ -| CAST(CAST('11.2' AS DOUBLE) AS BIGINT) | -+----------------------------------------+ -| 11 | -+----------------------------------------+ -1 row in set (0.00 sec) - -For the DECIMALV3 type, the cast operation performs rounding half up. -mysql> select cast (1.115 as DECIMALV3(16, 2)); -+---------------------------------+ -| cast(1.115 as DECIMALV3(16, 2)) | -+---------------------------------+ -| 1.12 | -+---------------------------------+ -``` -### keywords -CAST diff --git a/docs/en/docs/sql-manual/sql-functions/combinators/foreach.md b/docs/en/docs/sql-manual/sql-functions/combinators/foreach.md deleted file mode 100644 index cb2396d68917d6..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/combinators/foreach.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -{ - "title": "FOREACH", - "language": "en" -} ---- - - - -## FOREACH - - - - - -### description -#### Syntax - -`AGGREGATE_FUNCTION_FOREACH(arg...)` -Converts an aggregate function for tables into an aggregate function for arrays that aggregates the corresponding array items and returns an array of results. For example, sum_foreach for the arrays [1, 2], [3, 4, 5]and[6, 7]returns the result [10, 13, 5] after adding together the corresponding array items. - - - - -### example -``` -mysql [test]>select a , s from db; -+-----------+---------------+ -| a | s | -+-----------+---------------+ -| [1, 2, 3] | ["ab", "123"] | -| [20] | ["cd"] | -| [100] | ["efg"] | -| NULL | NULL | -| [null, 2] | [null, "c"] | -+-----------+---------------+ - -mysql [test]>select sum_foreach(a) from db; -+----------------+ -| sum_foreach(a) | -+----------------+ -| [121, 4, 3] | -+----------------+ - -mysql [test]>select count_foreach(s) from db; -+------------------+ -| count_foreach(s) | -+------------------+ -| [3, 2] | -+------------------+ - -mysql [test]>select array_agg_foreach(a) from db; -+-----------------------------------+ -| array_agg_foreach(a) | -+-----------------------------------+ -| [[1, 20, 100, null], [2, 2], [3]] | -+-----------------------------------+ - -mysql [test]>select map_agg_foreach(a,a) from db; -+---------------------------------------+ -| map_agg_foreach(a, a) | -+---------------------------------------+ -| [{1:1, 20:20, 100:100}, {2:2}, {3:3}] | -+---------------------------------------+ -``` -### keywords -FOREACH diff --git a/docs/en/docs/sql-manual/sql-functions/combinators/merge.md b/docs/en/docs/sql-manual/sql-functions/combinators/merge.md deleted file mode 100644 index 6f9ae643b5ca41..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/combinators/merge.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "MERGE", - "language": "en" -} ---- - - - -## MERGE - - - - - -### description -#### Syntax - -`AGGREGATE_FUNCTION_MERGE(agg_state)` -The aggregated intermediate results are aggregated and calculated to obtain the actual result. -The type of the result is consistent with `AGGREGATE_FUNCTION`. - -### example -``` -mysql [test]>select avg_merge(avg_state(1)) from d_table; -+-------------------------+ -| avg_merge(avg_state(1)) | -+-------------------------+ -| 1 | -+-------------------------+ -``` -### keywords -AGG_STATE, MERGE diff --git a/docs/en/docs/sql-manual/sql-functions/combinators/state.md b/docs/en/docs/sql-manual/sql-functions/combinators/state.md deleted file mode 100644 index 6202da018eec44..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/combinators/state.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "STATE", - "language": "en" -} ---- - - - -## STATE - - - - - -### description -#### Syntax - -`AGGREGATE_FUNCTION_STATE(arg...)` -Returns the intermediate result of the aggregation function, which can be used for subsequent aggregation or to obtain the actual calculation result through the merge combiner, or can be directly written into the agg_state type table and saved. -The type of the result is agg_state, and the function signature in agg_state is `AGGREGATE_FUNCTION(arg...)`. - -### example -``` -mysql [test]>select avg_merge(t) from (select avg_union(avg_state(1)) as t from d_table group by k1)p; -+----------------+ -| avg_merge(`t`) | -+----------------+ -| 1 | -+----------------+ -``` -### keywords -AGG_STATE,STATE diff --git a/docs/en/docs/sql-manual/sql-functions/combinators/union.md b/docs/en/docs/sql-manual/sql-functions/combinators/union.md deleted file mode 100644 index e3c249ca1a31a8..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/combinators/union.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "UNION", - "language": "en" -} ---- - - - -## UNION - - - - - -### description -#### Syntax - -`AGGREGATE_FUNCTION_UNION(agg_state)` -Aggregate multiple aggregation intermediate results into one. -The type of the result is agg_state, and the function signature is consistent with the input parameter. - -### example -``` -mysql [test]>select avg_merge(t) from (select avg_union(avg_state(1)) as t from d_table group by k1)p; -+----------------+ -| avg_merge(`t`) | -+----------------+ -| 1 | -+----------------+ -``` -### keywords -AGG_STATE, UNION diff --git a/docs/en/docs/sql-manual/sql-functions/conditional-functions/case.md b/docs/en/docs/sql-manual/sql-functions/conditional-functions/case.md deleted file mode 100644 index 4ca6a5079ac0ca..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/conditional-functions/case.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -{ - "title": "CASE", - "language": "en" -} ---- - - - -## case -### description -#### Syntax - -``` -CASE expression - WHEN condition1 THEN result1 - [WHEN condition2 THEN result2] - ... - [WHEN conditionN THEN resultN] - [ELSE result] -END -``` -OR -``` -CASE WHEN condition1 THEN result1 - [WHEN condition2 THEN result2] - ... - [WHEN conditionN THEN resultN] - [ELSE result] -END -``` - -Compare the expression with multiple possible values, and return the corresponding results when matching - -### example - -``` -mysql> select user_id, case user_id when 1 then 'user_id = 1' when 2 then 'user_id = 2' else 'user_id not exist' end test_case from test; -+---------+-------------+ -| user_id | test_case | -+---------+-------------+ -| 1 | user_id = 1 | -| 2 | user_id = 2 | -+---------+-------------+ - -mysql> select user_id, case when user_id = 1 then 'user_id = 1' when user_id = 2 then 'user_id = 2' else 'user_id not exist' end test_case from test; -+---------+-------------+ -| user_id | test_case | -+---------+-------------+ -| 1 | user_id = 1 | -| 2 | user_id = 2 | -+---------+-------------+ -``` -### keywords -CASE diff --git a/docs/en/docs/sql-manual/sql-functions/conditional-functions/coalesce.md b/docs/en/docs/sql-manual/sql-functions/conditional-functions/coalesce.md deleted file mode 100644 index 871c71d3b56564..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/conditional-functions/coalesce.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ - "title": "COALESCE", - "language": "en" -} ---- - - - -## coalesce -### description -#### Syntax - -`coalesce(expr1, expr2, ...., expr_n)` - - -Returns the first non empty expression in the parameter (from left to right) - -### example - -``` -mysql> select coalesce(NULL, '1111', '0000'); -+--------------------------------+ -| coalesce(NULL, '1111', '0000') | -+--------------------------------+ -| 1111 | -+--------------------------------+ -``` -### keywords -COALESCE diff --git a/docs/en/docs/sql-manual/sql-functions/conditional-functions/if.md b/docs/en/docs/sql-manual/sql-functions/conditional-functions/if.md deleted file mode 100644 index 4c23b08a58f5c6..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/conditional-functions/if.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "IF", - "language": "en" -} ---- - - - -## if -### description -#### Syntax - -`if(boolean condition, type valueTrue, type valueFalseOrNull)` - - -Returns valueTrue when condition is true, returns valueFalseOrNull otherwise. - -The return type is the type of the result of the valueTrue/valueFalseOrNull expression - -### example - -``` -mysql> select user_id, if(user_id = 1, "true", "false") test_if from test; -+---------+---------+ -| user_id | test_if | -+---------+---------+ -| 1 | true | -| 2 | false | -+---------+---------+ -``` -### keywords -IF diff --git a/docs/en/docs/sql-manual/sql-functions/conditional-functions/ifnull.md b/docs/en/docs/sql-manual/sql-functions/conditional-functions/ifnull.md deleted file mode 100644 index 5b5493fcd00cb1..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/conditional-functions/ifnull.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "IFNULL", - "language": "en" -} ---- - - - -## ifnull -### description -#### Syntax - -`ifnull(expr1, expr2)` - - -If the value of expr1 is not null, expr1 is returned, otherwise expr2 is returned - -### example - -``` -mysql> select ifnull(1,0); -+--------------+ -| ifnull(1, 0) | -+--------------+ -| 1 | -+--------------+ - -mysql> select ifnull(null,10); -+------------------+ -| ifnull(NULL, 10) | -+------------------+ -| 10 | -+------------------+ -``` -### keywords -IFNULL diff --git a/docs/en/docs/sql-manual/sql-functions/conditional-functions/nullif.md b/docs/en/docs/sql-manual/sql-functions/conditional-functions/nullif.md deleted file mode 100644 index 07a99354678e97..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/conditional-functions/nullif.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "NULLIF", - "language": "en" -} ---- - - - -## nullif -### description -#### Syntax - -`nullif(expr1, expr2)` - - -If the two parameters are equal, null is returned. Otherwise, the value of the first parameter is returned. It has the same effect as the following `case when` - -``` -CASE - WHEN expr1 = expr2 THEN NULL - ELSE expr1 -END -``` - -### example - -``` -mysql> select nullif(1,1); -+--------------+ -| nullif(1, 1) | -+--------------+ -| NULL | -+--------------+ - -mysql> select nullif(1,0); -+--------------+ -| nullif(1, 0) | -+--------------+ -| 1 | -+--------------+ -``` -### keywords -NULLIF diff --git a/docs/en/docs/sql-manual/sql-functions/conditional-functions/nvl.md b/docs/en/docs/sql-manual/sql-functions/conditional-functions/nvl.md deleted file mode 100644 index 6d09ed4bd72887..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/conditional-functions/nvl.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "NVL", - "language": "en" -} ---- - - - -## nvl - - - -nvl - - - -### description -#### Syntax - -`nvl(expr1, expr2)` - - -If the value of expr1 is not null, expr1 is returned, otherwise expr2 is returned - -### example - -``` -mysql> select nvl(1,0); -+--------------+ -| nvl(1, 0) | -+--------------+ -| 1 | -+--------------+ - -mysql> select nvl(null,10); -+------------------+ -| nvl(NULL, 10) | -+------------------+ -| 10 | -+------------------+ -``` -### keywords -NVL diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/convert-tz.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/convert-tz.md deleted file mode 100644 index 826cf33f1f5077..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/convert-tz.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "CONVERT_TZ", - "language": "en" -} ---- - - - -## convert_tz -### Description -#### Syntax - -`DATETIME CONVERT_TZ(DATETIME dt, VARCHAR from_tz, VARCHAR to_tz)` - -Convert datetime value. Go from the given input time zone to the specified time zone and return the result value. If the argument is invalid, the function returns null. - -### Example - -``` -mysql> select convert_tz('2019-08-01 13:21:03', 'Asia/Shanghai', 'America/Los_Angeles'); -+---------------------------------------------------------------------------+ -| convert_tz('2019-08-01 13:21:03', 'Asia/Shanghai', 'America/Los_Angeles') | -+---------------------------------------------------------------------------+ -| 2019-07-31 22:21:03 | -+---------------------------------------------------------------------------+ - -mysql> select convert_tz('2019-08-01 13:21:03', '+08:00', 'America/Los_Angeles'); -+--------------------------------------------------------------------+ -| convert_tz('2019-08-01 13:21:03', '+08:00', 'America/Los_Angeles') | -+--------------------------------------------------------------------+ -| 2019-07-31 22:21:03 | -+--------------------------------------------------------------------+ -``` - -### keywords - - CONVERT_TZ diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/curdate.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/curdate.md deleted file mode 100644 index eec3b9ba55e0b5..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/curdate.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "CURDATE,CURRENT_DATE", - "language": "en" -} ---- - - - -## curdate,current_date -### Description -#### Syntax - -`DATE CURDATE()` - -Get the current date and return it in Date type - -### example - -``` -mysql> SELECT CURDATE(); -+------------+ -| CURDATE() | -+------------+ -| 2019-12-20 | -+------------+ - -mysql> SELECT CURDATE() + 0; -+---------------+ -| CURDATE() + 0 | -+---------------+ -| 20191220 | -+---------------+ -``` -### keywords - - CURDATE,CURRENT_DATE diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/current-timestamp.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/current-timestamp.md deleted file mode 100644 index 03d4adb44a5f99..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/current-timestamp.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -{ - "title": "CURRENT_TIMESTAMP", - "language": "en" -} ---- - - - -## current_timestamp -### Description -#### Syntax - -`DATETIME CURRENT_TIMESTAMP()` - - -Get the current time and return it in Datetime type - -### example - -``` -mysql> select current_timestamp(); -+---------------------+ -| current_timestamp() | -+---------------------+ -| 2019-05-27 15:59:33 | -+---------------------+ -``` - -`DATETIMEV2 NOW(INT precision)` - - -Get the current time and return it in DatetimeV2 type. -Precision represents the second precision that the user wants. The current precision supports up to microseconds, that is, the value range of precision is [0, 6]. - -### example - -``` -mysql> select current_timestamp(3); -+-------------------------+ -| current_timestamp(3) | -+-------------------------+ -| 2022-09-06 16:18:00.922 | -+-------------------------+ -``` - -Note: -1. Currently, only DatetimeV2 type supports precision. -2. Limited by the JDK implementation, if you use jdk8 to build FE, the precision can be up to milliseconds (three decimal places), and the larger precision bits will be filled with 0. If you need higher accuracy, please use jdk11 to build FE. - -### keywords - CURRENT_TIMESTAMP,CURRENT,TIMESTAMP diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/curtime.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/curtime.md deleted file mode 100644 index 4f1f0943641215..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/curtime.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "CURTIME,CURRENT_TIME", - "language": "en" -} ---- - - - -## curtime,current_time -### Description -#### Syntax - -`TIME CURTIME()` - -Get the current date and return it in Time type - -### Examples - -``` -mysql> select current_time(); -+---------------------+ -| current_time() | -+---------------------+ -| 2023-08-01 17:32:24 | -+---------------------+ -``` - -### keywords - - CURTIME,CURRENT_TIME diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/date-add.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/date-add.md deleted file mode 100644 index 468ca18723c61f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/date-add.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ - "title": "DATE_ADD", - "language": "en" -} ---- - - - -## date_add -### Description -#### Syntax - -`DATETIME DATE_ADD(DATETIME date, INTERVAL expr type)` - - -Adds a specified time interval to the date. - -The date parameter is a valid date expression. - -The expr parameter is the interval you want to add. - -The type parameter can be the following values: YEAR, MONTH, DAY, HOUR, MINUTE, SECOND - -### example - -``` -mysql> select date_add('2010-11-30 23:59:59', INTERVAL 2 DAY); -+-------------------------------------------------+ -| date_add('2010-11-30 23:59:59', INTERVAL 2 DAY) | -+-------------------------------------------------+ -| 2010-12-02 23:59:59 | -+-------------------------------------------------+ -``` -### keywords - DATE_ADD,DATE,ADD diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/date-format.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/date-format.md deleted file mode 100644 index 54f2acd7597656..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/date-format.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -{ - "title": "DATE_FORMAT", - "language": "en" -} ---- - - - -## date_format -### Description -#### Syntax - -`VARCHAR DATE_FORMAT (DATETIME DATE, VARCHAR Format)` - - -Convert the date type to a string according to the format type. -Convert the date type to a string according to the format type. Currently, it supports a maximum of 128 bytes for the string. If the return value length exceeds 128 bytes, then it returns NULL. - -The date parameter is the valid date. Format specifies the date/time output format. - -The formats available are: - -% a | Abbreviation for Sunday Name - -% B | Abbreviated Monthly Name - -% C | Month, numerical value - -% D | Sky in the Moon with English Prefix - -% d | Monthly day, numerical value (00-31) - -% e | Monthly day, numerical value (0-31) - -% f | microseconds - -% H | Hours (00-23) - -% h | hour (01-12) - -% I | Hours (01-12) - -% I | min, numerical value (00-59) - -% J | Days of Year (001-366) - -% k | hours (0-23) - -% L | Hours (1-12) - -% M | Moon Name - -% m | month, numerical value (00-12) - -% p | AM or PM, only available on 12-hours system - -% R | Time, 12-hour (hh:mm:ss), could be with or without AM/PM marking - -% S | seconds (00-59) - -% s | seconds (00-59) - -% T | Time, 24-hour (hh:mm:ss) - -% U | Week (00-53) Sunday is the first day of the week - -% U | Week (00-53) Monday is the first day of the week - -% V | Week (01-53) Sunday is the first day of the week, and% X is used. - -% v | Week (01-53) Monday is the first day of the week, and% x is used - -% W | Sunday - -% w | Weekly day (0 = Sunday, 6 = Saturday) - -% X | Year, where Sunday is the first day of the week, 4 places, and% V use - -% x | year, of which Monday is the first day of the week, 4 places, and% V - -% Y | Year, 4 - -% y | Year, 2 - -%% | Represent % - -Also support 3 formats: - -yyyyMMdd - -yyyy-MM-dd - -yyyy-MM-dd HH:mm:ss - -### example - -``` -mysql> select date_format('2009-10-04 22:23:00', '%W %M %Y'); -+------------------------------------------------+ -| date_format('2009-10-04 22:23:00', '%W %M %Y') | -+------------------------------------------------+ -| Sunday October 2009 | -+------------------------------------------------+ - -mysql> select date_format('2007-10-04 22:23:00', '%H:%i:%s'); -+------------------------------------------------+ -| date_format('2007-10-04 22:23:00', '%H:%i:%s') | -+------------------------------------------------+ -| 22:23:00 | -+------------------------------------------------+ - -mysql> select date_format('1900-10-04 22:23:00', '%D %y %a %d %m %b %j'); -+------------------------------------------------------------+ -| date_format('1900-10-04 22:23:00', '%D %y %a %d %m %b %j') | -+------------------------------------------------------------+ -| 4th 00 Thu 04 10 Oct 277 | -+------------------------------------------------------------+ - -mysql> select date_format('1997-10-04 22:23:00', '%H %k %I %r %T %S %w'); -+------------------------------------------------------------+ -| date_format('1997-10-04 22:23:00', '%H %k %I %r %T %S %w') | -+------------------------------------------------------------+ -| 22 22 10 10:23:00 PM 22:23:00 00 6 | -+------------------------------------------------------------+ - -mysql> select date_format('1999-01-01 00:00:00', '%X %V'); -+---------------------------------------------+ -| date_format('1999-01-01 00:00:00', '%X %V') | -+---------------------------------------------+ -| 1998 52 | -+---------------------------------------------+ - -mysql> select date_format('2006-06-01', '%d'); -+------------------------------------------+ -| date_format('2006-06-01 00:00:00', '%d') | -+------------------------------------------+ -| 01 | -+------------------------------------------+ - -mysql> select date_format('2006-06-01', '%%%d'); -+--------------------------------------------+ -| date_format('2006-06-01 00:00:00', '%%%d') | -+--------------------------------------------+ -| %01 | -+--------------------------------------------+ -``` -### keywords - DATE_FORMAT,DATE,FORMAT diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/date-sub.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/date-sub.md deleted file mode 100644 index c042ae45c776ec..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/date-sub.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ - "title": "DATE_SUB", - "language": "en" -} ---- - - - -## date_sub -### Description -#### Syntax - -`DATETIME DATE_SUB(DATETIME date, INTERVAL expr type)` - - -Subtract the specified time interval from the date - -The date parameter is a valid date expression. - -The expr parameter is the interval you want to add. - -The type parameter can be the following values: YEAR, MONTH, DAY, HOUR, MINUTE, SECOND - -### example - -``` -mysql> select date_sub('2010-11-30 23:59:59', INTERVAL 2 DAY); -+-------------------------------------------------+ -| date_sub('2010-11-30 23:59:59', INTERVAL 2 DAY) | -+-------------------------------------------------+ -| 2010-11-28 23:59:59 | -+-------------------------------------------------+ -``` -### keywords - Date, date, date diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/date-trunc.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/date-trunc.md deleted file mode 100644 index 23a1c25393e7f2..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/date-trunc.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -{ - "title": "DATE_TRUNC", - "language": "en" -} ---- - - - -## date_trunc - -date_trunc - -### Description -#### Syntax - -`DATETIME DATE_TRUNC(DATETIME datetime, VARCHAR unit)` - - -Truncates datetime in the specified time unit. - -datetime is a legal date expression. - -unit is the time unit you want to truncate. The optional values are as follows: [`second`,`minute`,`hour`,`day`,`week`,`month`,`quarter`,`year`]. - -### example - -``` -mysql> select date_trunc('2010-12-02 19:28:30', 'second'); -+-------------------------------------------------+ -| date_trunc('2010-12-02 19:28:30', 'second') | -+-------------------------------------------------+ -| 2010-12-02 19:28:30 | -+-------------------------------------------------+ - -mysql> select date_trunc('2010-12-02 19:28:30', 'minute'); -+-------------------------------------------------+ -| date_trunc('2010-12-02 19:28:30', 'minute') | -+-------------------------------------------------+ -| 2010-12-02 19:28:00 | -+-------------------------------------------------+ - -mysql> select date_trunc('2010-12-02 19:28:30', 'hour'); -+-------------------------------------------------+ -| date_trunc('2010-12-02 19:28:30', 'hour') | -+-------------------------------------------------+ -| 2010-12-02 19:00:00 | -+-------------------------------------------------+ - -mysql> select date_trunc('2010-12-02 19:28:30', 'day'); -+-------------------------------------------------+ -| date_trunc('2010-12-02 19:28:30', 'day') | -+-------------------------------------------------+ -| 2010-12-02 00:00:00 | -+-------------------------------------------------+ - -mysql> select date_trunc('2010-12-02 19:28:30', 'week'); -+-------------------------------------------+ -| date_trunc('2010-12-02 19:28:30', 'week') | -+-------------------------------------------+ -| 2010-11-29 00:00:00 | -+-------------------------------------------+ - -mysql> select date_trunc('2010-12-02 19:28:30', 'month'); -+-------------------------------------------------+ -| date_trunc('2010-12-02 19:28:30', 'month') | -+-------------------------------------------------+ -| 2010-12-01 00:00:00 | -+-------------------------------------------------+ - -mysql> select date_trunc('2010-12-02 19:28:30', 'quarter'); -+-------------------------------------------------+ -| date_trunc('2010-12-02 19:28:30', 'quarter') | -+-------------------------------------------------+ -| 2010-10-01 00:00:00 | -+-------------------------------------------------+ - -mysql> select date_trunc('2010-12-02 19:28:30', 'year'); -+-------------------------------------------------+ -| date_trunc('2010-12-02 19:28:30', 'year') | -+-------------------------------------------------+ -| 2010-01-01 00:00:00 | -+-------------------------------------------------+ -``` -### keywords - -DATE_TRUNC,DATE,DATETIME diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/date_ceil.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/date_ceil.md deleted file mode 100644 index d2a988295ba7ac..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/date_ceil.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -{ - "title": "date_ceil", - "language": "en" -} ---- - - - -## date_ceil -### description -#### Syntax - -`DATETIME DATE_CEIL(DATETIME datetime, INTERVAL period type)` - - -Convert the date to the nearest rounding up time of the specified time interval period. - -The datetime parameter is a valid date expression. - -The period parameter specifies how many units each cycle consists of, starting from 0001-01-01T00:00:00 - -type :YEAR, MONTH, DAY, HOUR, MINUTE, SECOND. - -### example - -``` -mysql [(none)]>select date_ceil("2023-07-13 22:28:18",interval 5 second); -+--------------------------------------------------------------+ -| second_ceil('2023-07-13 22:28:18', 5, '0001-01-01 00:00:00') | -+--------------------------------------------------------------+ -| 2023-07-13 22:28:20 | -+--------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql [(none)]>select date_ceil("2023-07-13 22:28:18",interval 5 minute); -+--------------------------------------------------------------+ -| minute_ceil('2023-07-13 22:28:18', 5, '0001-01-01 00:00:00') | -+--------------------------------------------------------------+ -| 2023-07-13 22:30:00 | -+--------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql [(none)]>select date_ceil("2023-07-13 22:28:18",interval 5 hour); -+------------------------------------------------------------+ -| hour_ceil('2023-07-13 22:28:18', 5, '0001-01-01 00:00:00') | -+------------------------------------------------------------+ -| 2023-07-13 23:00:00 | -+------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql [(none)]>select date_ceil("2023-07-13 22:28:18",interval 5 day); -+-----------------------------------------------------------+ -| day_ceil('2023-07-13 22:28:18', 5, '0001-01-01 00:00:00') | -+-----------------------------------------------------------+ -| 2023-07-15 00:00:00 | -+-----------------------------------------------------------+ -1 row in set (0.00 sec) - -mysql [(none)]>select date_ceil("2023-07-13 22:28:18",interval 5 month); -+-------------------------------------------------------------+ -| month_ceil('2023-07-13 22:28:18', 5, '0001-01-01 00:00:00') | -+-------------------------------------------------------------+ -| 2023-12-01 00:00:00 | -+-------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql [(none)]>select date_ceil("2023-07-13 22:28:18",interval 5 year); -+------------------------------------------------------------+ -| year_ceil('2023-07-13 22:28:18', 5, '0001-01-01 00:00:00') | -+------------------------------------------------------------+ -| 2026-01-01 00:00:00 | -+------------------------------------------------------------+ -1 row in set (0.00 sec) -``` - -### keywords - - DATE_CEIL,DATE,CEIL diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/date_floor.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/date_floor.md deleted file mode 100644 index cc2ac666d95e25..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/date_floor.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -{ - "title": "date_floor", - "language": "en" -} ---- - - - -## date_floor -### description -#### Syntax - -`DATETIME DATE_FLOOR(DATETIME datetime, INTERVAL period type)` - - -Converts a date to the nearest rounding down time of a specified time interval period. - -The datetime parameter is a valid date expression. - -The period parameter specifies how many units each cycle consists of, starting from 0001-01-01T00:00:00 - -type :YEAR, MONTH, DAY, HOUR, MINUTE, SECOND. - -### example - -``` -mysql>select date_floor("0001-01-01 00:00:16",interval 5 second); -+---------------------------------------------------------------+ -| second_floor('0001-01-01 00:00:16', 5, '0001-01-01 00:00:00') | -+---------------------------------------------------------------+ -| 0001-01-01 00:00:15 | -+---------------------------------------------------------------+ -1 row in set (0.00 sec) - -mysql>select date_floor("0001-01-01 00:00:18",interval 5 second); -+---------------------------------------------------------------+ -| second_floor('0001-01-01 00:00:18', 5, '0001-01-01 00:00:00') | -+---------------------------------------------------------------+ -| 0001-01-01 00:00:15 | -+---------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql>select date_floor("2023-07-13 22:28:18",interval 5 minute); -+---------------------------------------------------------------+ -| minute_floor('2023-07-13 22:28:18', 5, '0001-01-01 00:00:00') | -+---------------------------------------------------------------+ -| 2023-07-13 22:25:00 | -+---------------------------------------------------------------+ -1 row in set (0.00 sec) - -mysql>select date_floor("2023-07-13 22:28:18",interval 5 hour); -+-------------------------------------------------------------+ -| hour_floor('2023-07-13 22:28:18', 5, '0001-01-01 00:00:00') | -+-------------------------------------------------------------+ -| 2023-07-13 18:00:00 | -+-------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql>select date_floor("2023-07-13 22:28:18",interval 5 day); -+------------------------------------------------------------+ -| day_floor('2023-07-13 22:28:18', 5, '0001-01-01 00:00:00') | -+------------------------------------------------------------+ -| 2023-07-10 00:00:00 | -+------------------------------------------------------------+ -1 row in set (0.00 sec) - -mysql>select date_floor("2023-07-13 22:28:18",interval 5 month); -+--------------------------------------------------------------+ -| month_floor('2023-07-13 22:28:18', 5, '0001-01-01 00:00:00') | -+--------------------------------------------------------------+ -| 2023-07-01 00:00:00 | -+--------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql>select date_floor("2023-07-13 22:28:18",interval 5 year); -+-------------------------------------------------------------+ -| year_floor('2023-07-13 22:28:18', 5, '0001-01-01 00:00:00') | -+-------------------------------------------------------------+ -| 2021-01-01 00:00:00 | -+-------------------------------------------------------------+ - -``` - -### keywords - - DATE_FLOOR,DATE,FLOOR diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/datediff.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/datediff.md deleted file mode 100644 index 8b7407e608d87c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/datediff.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -{ - "title": "DATEDIFF", - "language": "en" -} ---- - - - - -## datediff -### Description -#### Syntax - -`INT DATEDIFF (DATETIME expr1, DATETIME expr2)` - - -Calculate expr1 - expr2, the result is accurate to the unit of a day. - -Expr1 and expr2 parameters are valid date or date/time expressions. - -Note: Only the date part of the value participates in the calculation. - -#### example - -``` -mysql> select datediff(CAST('2007-12-31 23:59:59' AS DATETIME), CAST('2007-12-30' AS DATETIME)); -+-----------------------------------------------------------------------------------+ -| datediff(CAST('2007-12-31 23:59:59' AS DATETIME), CAST('2007-12-30' AS DATETIME)) | -+-----------------------------------------------------------------------------------+ -| 1 | -+-----------------------------------------------------------------------------------+ - -mysql> select datediff(CAST('2010-11-30 23:59:59' AS DATETIME), CAST('2010-12-31' AS DATETIME)); -+-----------------------------------------------------------------------------------+ -| datediff(CAST('2010-11-30 23:59:59' AS DATETIME), CAST('2010-12-31' AS DATETIME)) | -+-----------------------------------------------------------------------------------+ -| -31 | -+-----------------------------------------------------------------------------------+ -``` -### keywords - DATEDIFF diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/day.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/day.md deleted file mode 100644 index c4c65b305582ca..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/day.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -{ - "title": "DAY", - "language": "en" -} ---- - - - -## day -### Description -#### Syntax - -`INT DAY(DATETIME date)` - - -Get the day information in the date, and return values range from 1 to 31. - -The parameter is Date or Datetime type - -### example - -``` -mysql> select day('1987-01-31'); -+----------------------------+ -| day('1987-01-31 00:00:00') | -+----------------------------+ -| 31 | -+----------------------------+ -``` -### keywords - DAY diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/dayname.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/dayname.md deleted file mode 100644 index 4aec1f5b71f6ec..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/dayname.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -{ - "title": "DAYNAME", - "language": "en" -} ---- - - - -## dayname -### Description -#### Syntax - -`VARCHAR DAYNAME (DATE)` - - -Date name corresponding to return date - -The parameter is Date or Datetime type - -### example - -``` -mysql> select dayname('2007-02-03 00:00:00'); -+--------------------------------+ -| dayname('2007-02-03 00:00:00') | -+--------------------------------+ -| Saturday | -+--------------------------------+ -``` -### keywords - DAYNAME diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/dayofmonth.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/dayofmonth.md deleted file mode 100644 index ee1a3ad301c70a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/dayofmonth.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "DAYOFMONTH", - "language": "en" -} ---- - - - - -## dayofmonth -### Description -#### Syntax - -`INT DAYOFMONTH (DATETIME date)` - - -Get the day information in the date, and return values range from 1 to 31. - -The parameter is Date or Datetime type - -### example - -``` -mysql> select dayofmonth('1987-01-31'); -+-----------------------------------+ -| dayofmonth('1987-01-31 00:00:00') | -+-----------------------------------+ -| 31 | -+-----------------------------------+ -``` -### keywords - DAYOFMONTH diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/dayofweek.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/dayofweek.md deleted file mode 100644 index 545c580a381452..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/dayofweek.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "DAYOFWEEK", - "language": "en" -} ---- - - - -## dayofweek -### Description -#### Syntax - -`INT DAYOFWEEK (DATETIME date)` - - -The DAYOFWEEK function returns the index value of the working day of the date, that is, 1 on Sunday, 2 on Monday, and 7 on Saturday. - -The parameter is Date or Datetime type - -### example -``` -mysql> select dayofweek('2019-06-25'); -+----------------------------------+ -| dayofweek('2019-06-25 00:00:00') | -+----------------------------------+ -| 3 | -+----------------------------------+ - -mysql> select dayofweek(cast(20190625 as date)); -+-----------------------------------+ -| dayofweek(CAST(20190625 AS DATE)) | -+-----------------------------------+ -| 3 | -+-----------------------------------+ -``` -### keywords - DAYOFWEEK diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/dayofyear.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/dayofyear.md deleted file mode 100644 index 2941f4d80825aa..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/dayofyear.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "DAYOFYEAR", - "language": "en" -} ---- - - - -## dayofyear -### Description -#### Syntax - -`INT DAYOFYEAR (DATETIME date)` - - -The date of acquisition is the date of the corresponding year. - -The parameter is Date or Datetime type - -### example - - -``` -mysql> select dayofyear('2007-02-03 00:00:00'); -+----------------------------------+ -| dayofyear('2007-02-03 00:00:00') | -+----------------------------------+ -| 34 | -+----------------------------------+ -``` -### keywords - DAYOFYEAR diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/days-add.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/days-add.md deleted file mode 100644 index 3697b789582462..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/days-add.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "DAYS_ADD", - "language": "en" -} ---- - - - -## days_add -### description -#### Syntax - -`DATETIME DAYS_ADD(DATETIME date, INT days)` - -From date time or date plus specified days - -The parameter date can be DATETIME or DATE, and the return type is consistent with that of the parameter date. - -### example - -``` -mysql> select days_add(to_date("2020-02-02 02:02:02"), 1); -+---------------------------------------------+ -| days_add(to_date('2020-02-02 02:02:02'), 1) | -+---------------------------------------------+ -| 2020-02-03 | -+---------------------------------------------+ -``` - -### keywords - - DAYS_ADD diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/days-diff.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/days-diff.md deleted file mode 100644 index 603c4b0e6743ec..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/days-diff.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "DAYS_DIFF", - "language": "en" -} ---- - - - -## days_diff -### description -#### Syntax - -`INT days_diff(DATETIME enddate, DATETIME startdate)` - -The number of days between end date and start date, the date judgment is accurate to seconds and rounded down to the nearest integer. Different from the date_diff function, the datediff function judges the date with precision to the day." - -### example - -``` -mysql> select days_diff('2020-12-25 22:00:00','2020-12-24 22:00:00'); -+---------------------------------------------------------+ -| days_diff('2020-12-25 22:00:00', '2020-12-24 22:00:00') | -+---------------------------------------------------------+ -| 1 | -+---------------------------------------------------------+ - -mysql> select days_diff('2020-12-25 22:00:00','2020-12-24 22:00:01'); -+---------------------------------------------------------+ -| days_diff('2020-12-24 22:00:01', '2020-12-25 22:00:00') | -+---------------------------------------------------------+ -| 0 | -+---------------------------------------------------------+ -``` - -### keywords - - days_diff diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/days-sub.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/days-sub.md deleted file mode 100644 index 6bddf9d53b7246..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/days-sub.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "DAYS_SUB", - "language": "en" -} ---- - - - -## days_sub -### description -#### Syntax - -`DATETIME DAYS_SUB(DATETIME date, INT days)` - -Subtract a specified number of days from a datetime or date - -The parameter date can be DATETIME or DATE, and the return type is consistent with that of the parameter date. - -### example - -``` -mysql> select days_sub("2020-02-02 02:02:02", 1); -+------------------------------------+ -| days_sub('2020-02-02 02:02:02', 1) | -+------------------------------------+ -| 2020-02-01 02:02:02 | -+------------------------------------+ -``` - -### keywords - - DAYS_SUB diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/extract.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/extract.md deleted file mode 100644 index 19e2f9a271fe9c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/extract.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "EXTRACT", - "language": "en" -} ---- - - - -## extract -### description -#### Syntax - -`INT extract(unit FROM DATETIME)` - -Extract DATETIME The value of a specified unit. The unit can be year, day, hour, minute, second or microsecond - -### Example - -``` -mysql> select extract(year from '2022-09-22 17:01:30') as year, - -> extract(month from '2022-09-22 17:01:30') as month, - -> extract(day from '2022-09-22 17:01:30') as day, - -> extract(hour from '2022-09-22 17:01:30') as hour, - -> extract(minute from '2022-09-22 17:01:30') as minute, - -> extract(second from '2022-09-22 17:01:30') as second, - -> extract(microsecond from cast('2022-09-22 17:01:30.000123' as datetimev2(6))) as microsecond; -+------+-------+------+------+--------+--------+-------------+ -| year | month | day | hour | minute | second | microsecond | -+------+-------+------+------+--------+--------+-------------+ -| 2022 | 9 | 22 | 17 | 1 | 30 | 123 | -+------+-------+------+------+--------+--------+-------------+ -``` - -### keywords - - extract diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/from-days.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/from-days.md deleted file mode 100644 index 343d337a1ab9bb..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/from-days.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "FROM_DAYS", - "language": "en" -} ---- - - - -## from_days -### Description -#### Syntax - -`DATE FROM_DAYS(INT N)` - - -Calculate which day by the number of days from 0000-01-01 - -### example - -``` -mysql > select from u days (730669); -+-------------------+ -| from_days(730669) | -+-------------------+ -| 2000-07-03 | -+-------------------+ -``` - -### keywords - FROM_DAYS,FROM,DAYS diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/from-second.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/from-second.md deleted file mode 100644 index bcbf92521ad4b8..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/from-second.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -{ - "title": "FROM_SECOND", - "language": "en" -} ---- - - - -## from_second -### description -#### syntax - -`DATETIME FROM_SECOND(BIGINT unix_timestamp)` -`DATETIME FROM_MILLISECOND(BIGINT unix_timestamp)` -`DATETIME FROM_MICROSECOND(BIGINT unix_timestamp)` - -Converts a timestamp to its DATETIME represent, with argument as an integer and returned as a DATETIME type. Returns `NULL` if `unix_timestamp < 0` or if the function result is greater than `9999-12-31 23:59:59.999999`. - -### example - -``` -mysql> set time_zone='Asia/Shanghai'; - -mysql> select from_second(-1); -+---------------------------+ -| from_second(-1) | -+---------------------------+ -| NULL | -+---------------------------+ - -mysql> select from_millisecond(12345678); -+----------------------------+ -| from_millisecond(12345678) | -+----------------------------+ -| 1970-01-01 11:25:45.678 | -+----------------------------+ - -mysql> select from_microsecond(253402271999999999); -+--------------------------------------+ -| from_microsecond(253402271999999999) | -+--------------------------------------+ -| 9999-12-31 23:59:59.999999 | -+--------------------------------------+ - -mysql> select from_microsecond(253402272000000000); -+--------------------------------------+ -| from_microsecond(253402272000000000) | -+--------------------------------------+ -| NULL | -+--------------------------------------+ -``` - -### keywords - - FROM_SECOND,FROM,SECOND,MILLISECOND,MICROSECOND diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/from-unixtime.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/from-unixtime.md deleted file mode 100644 index 90eab6a8ff8d12..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/from-unixtime.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -{ - "title": "FROM_UNIXTIME", - "language": "en" -} ---- - - - -## from_unixtime -### description -#### syntax - -`DATETIME FROM UNIXTIME (BIGINT unix timestamp [, VARCHAR string format]` - -Convert the UNIX timestamp to the corresponding time format of bits, and the format returned is specified by `string_format` - -Input is an big integer and return is a string type - -Support `date_format`'s format, and default is `%Y-%m-%d %H:%i:%s` - -Other `string_format` is illegal and will returns NULL. - -The current supported range for `unix_timestamp` is `[0, 32536771199]`. `unix_timestamp` values that fall outside of this range will be returned as NULL - -### example - -``` -mysql> select from_unixtime(1196440219); -+---------------------------+ -| from_unixtime(1196440219) | -+---------------------------+ -| 2007-12-01 00:30:19 | -+---------------------------+ - -mysql> select from_unixtime(1196440219, '%Y-%m-%d'); -+-----------------------------------------+ -| from_unixtime(1196440219, '%Y-%m-%d') | -+-----------------------------------------+ -| 2007-12-01 | -+-----------------------------------------+ - -mysql> select from_unixtime(1196440219, '%Y-%m-%d %H:%i:%s'); -+--------------------------------------------------+ -|From unixtime (1196440219,'%Y-%m-%d %H:%i:%s') | -+--------------------------------------------------+ -| 2007-12-01 00:30:19 | -+--------------------------------------------------+ -``` - -For timestamps that exceed the range, you can use the "from_second" function. -`DATETIME FROM_SECOND(BIGINT unix_timestamp)` -``` -mysql> select from_second(21474836470); -+--------------------------+ -| from_second(21474836470) | -+--------------------------+ -| 2650-07-06 16:21:10 | -+--------------------------+ -``` - -### keywords - - FROM_UNIXTIME,FROM,UNIXTIME diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/hour.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/hour.md deleted file mode 100644 index afe164c7cbeb52..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/hour.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "HOUR", - "language": "en" -} ---- - - - -## hour -### description -#### Syntax - -`INT HOUR(DATETIME date)` - -Returns hour information in the time type, ranging from 0,23 - -The parameter is Date or Datetime type - -### example - -``` -mysql> select hour('2018-12-31 23:59:59'); -+-----------------------------+ -| hour('2018-12-31 23:59:59') | -+-----------------------------+ -| 23 | -+-----------------------------+ -``` -### keywords - HOUR diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/hours-add.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/hours-add.md deleted file mode 100644 index ddd3ee704c4b1b..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/hours-add.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "HOURS_ADD", - "language": "en" -} ---- - - - -## hours_add -### description -#### Syntax - -`DATETIME HOURS_ADD(DATETIME date, INT hours)` - -Add specified hours from date time or date - -The parameter date can be DATETIME or DATE, and the return type is DATETIME. - -### example - -``` -mysql> select hours_add("2020-02-02 02:02:02", 1); -+-------------------------------------+ -| hours_add('2020-02-02 02:02:02', 1) | -+-------------------------------------+ -| 2020-02-02 03:02:02 | -+-------------------------------------+ -``` - -### keywords - - HOURS_ADD diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/hours-diff.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/hours-diff.md deleted file mode 100644 index f299f8d15c6d87..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/hours-diff.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "HOURS_DIFF", - "language": "en" -} ---- - - - -## hours_diff -### description -#### Syntax - -`INT hours_diff(DATETIME enddate, DATETIME startdate)` - -The difference between the start time and the end time is a few hours - -### example - -``` -mysql> select hours_diff('2020-12-25 22:00:00','2020-12-25 21:00:00'); -+----------------------------------------------------------+ -| hours_diff('2020-12-25 22:00:00', '2020-12-25 21:00:00') | -+----------------------------------------------------------+ -| 1 | -+----------------------------------------------------------+ -``` - -### keywords - - hours_diff diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/hours-sub.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/hours-sub.md deleted file mode 100644 index 7bb2ba1f75d0b0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/hours-sub.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "HOURS_SUB", - "language": "en" -} ---- - - - -## hours_sub -### description -#### Syntax - -`DATETIME HOURS_SUB(DATETIME date, INT hours)` - -Subtracts a specified number of hours from a datetime or date - -The parameter date can be DATETIME or DATE, and the return type is DATETIME. - -### example - -``` -mysql> select hours_sub("2020-02-02 02:02:02", 1); -+-------------------------------------+ -| hours_sub('2020-02-02 02:02:02', 1) | -+-------------------------------------+ -| 2020-02-02 01:02:02 | -+-------------------------------------+ -``` - -### keywords - - HOURS_SUB diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/last-day.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/last-day.md deleted file mode 100644 index 0eb3956f4bb843..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/last-day.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -{ - "title": "LAST_DAY", - "language": "en" -} ---- - - - -## last_day -### Description -#### Syntax - -`DATE last_day(DATETIME date)` - -Return the last day of the month, the return day may be : -'28'(February and not a leap year), -'29'(February and a leap year), -'30'(April, June, September, November), -'31'(January, March, May, July, August, October, December) - -### example - -``` -mysql > select last_day('2000-02-03'); -+-------------------+ -| last_day('2000-02-03 00:00:00') | -+-------------------+ -| 2000-02-29 | -+-------------------+ -``` - -### keywords - LAST_DAY,DAYS diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/localtime.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/localtime.md deleted file mode 100644 index 02365476539b6f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/localtime.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "LOCALTIME,LOCALTIMESTAMP", - "language": "en" -} ---- - - - -## localtime,localtimestamp -### description -#### Syntax - -`DATETIME localtime()` -`DATETIME localtimestamp()` - -Get the current time and return it in Datetime type. - -### Example - -``` -mysql> select localtime(); -+---------------------+ -| localtime() | -+---------------------+ -| 2022-09-22 17:30:23 | -+---------------------+ - -mysql> select localtimestamp(); -+---------------------+ -| localtimestamp() | -+---------------------+ -| 2022-09-22 17:30:29 | -+---------------------+ -``` - -### keywords - - localtime,localtimestamp diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/makedate.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/makedate.md deleted file mode 100644 index 83b091409d9347..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/makedate.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -{ - "title": "MAKEDATE", - "language": "en" -} ---- - - - -## makedate -### Description -#### Syntax - -`DATE MAKEDATE(INT year, INT dayofyear)` - -Returns a date, given year and day-of-year values. dayofyear must be greater than 0 or the result is NULL. - -### example -``` -mysql> select makedate(2021,1), makedate(2021,100), makedate(2021,400); -+-------------------+---------------------+---------------------+ -| makedate(2021, 1) | makedate(2021, 100) | makedate(2021, 400) | -+-------------------+---------------------+---------------------+ -| 2021-01-01 | 2021-04-10 | 2022-02-04 | -+-------------------+---------------------+---------------------+ -``` -### keywords - MAKEDATE diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/microsecond.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/microsecond.md deleted file mode 100644 index a99ad0b4c8e33b..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/microsecond.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "MICROSECOND", - "language": "en" -} ---- - - - -## microsecond -### description -#### Syntax - -`INT MICROSECOND(DATETIMEV2 date)` - -Returns microsecond information in the time type. - -The parameter is Datetime type - -### example - -``` -mysql> select microsecond(cast('1999-01-02 10:11:12.000123' as datetimev2(6))) as microsecond; -+-------------+ -| microsecond | -+-------------+ -| 123 | -+-------------+ -``` -### keywords - MICROSECOND diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/microseconds-add.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/microseconds-add.md deleted file mode 100644 index 106e015dfa70d3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/microseconds-add.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "MICROSECONDS_ADD", - "language": "en" -} ---- - - - -## microseconds_add -### description -#### Syntax - -`DATETIMEV2 microseconds_add(DATETIMEV2 basetime, INT delta)` -- basetime: Base time whose type is DATETIMEV2 -- delta: Microseconds to add to basetime -- Return type of this function is DATETIMEV2 - -### example -``` -mysql> select now(3), microseconds_add(now(3), 100000); -+-------------------------+----------------------------------+ -| now(3) | microseconds_add(now(3), 100000) | -+-------------------------+----------------------------------+ -| 2023-02-21 11:35:56.556 | 2023-02-21 11:35:56.656 | -+-------------------------+----------------------------------+ -``` -`now(3)` returns current time as type DATETIMEV2 with precision 3d,`microseconds_add(now(3), 100000)` means 100000 microseconds after current time - -### keywords - microseconds_add - - \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/microseconds-diff.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/microseconds-diff.md deleted file mode 100644 index 7a48fc117b59c0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/microseconds-diff.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -{ - "title": "MICROSECONDS_DIFF", - "language": "en" -} ---- - - - -## microseconds_diff -### description -#### Syntax - -`INT microseconds_diff(DATETIME enddate, DATETIME startdate)` - -How many microseconds is the difference between the start time and the end time. - -### example - -``` -mysql> select microseconds_diff('2020-12-25 21:00:00.623000','2020-12-25 21:00:00.123000'); -+-----------------------------------------------------------------------------------------------------------------------------+ -| microseconds_diff(cast('2020-12-25 21:00:00.623000' as DATETIMEV2(6)), cast('2020-12-25 21:00:00.123000' as DATETIMEV2(6))) | -+-----------------------------------------------------------------------------------------------------------------------------+ -| 500000 | -+-----------------------------------------------------------------------------------------------------------------------------+ -1 row in set (0.12 sec) -``` - -### keywords - - microseconds_diff diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/microseconds-sub.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/microseconds-sub.md deleted file mode 100644 index 439bc2a37429b9..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/microseconds-sub.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "MICROSECONDS_SUB", - "language": "en" -} ---- - - - -## microseconds_sub -### description -#### Syntax - -`DATETIMEV2 microseconds_sub(DATETIMEV2 basetime, INT delta)` -- basetime: Base time whose type is DATETIMEV2 -- delta: Microseconds to subtract from basetime -- Return type of this function is DATETIMEV2 - -### example -``` -mysql> select now(3), microseconds_sub(now(3), 100000); -+-------------------------+----------------------------------+ -| now(3) | microseconds_sub(now(3), 100000) | -+-------------------------+----------------------------------+ -| 2023-02-25 02:03:05.174 | 2023-02-25 02:03:05.074 | -+-------------------------+----------------------------------+ -``` -`now(3)` returns current time as type DATETIMEV2 with precision `3`,`microseconds_sub(now(3), 100000)` means 100000 microseconds before current time - -### keywords - microseconds_sub diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/milliseconds-add.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/milliseconds-add.md deleted file mode 100644 index 4d2c2f27d2221a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/milliseconds-add.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -{ - "title": "MILLISECONDS_ADD", - "language": "en" -} ---- - - - -## milliseconds_add -### description -#### Syntax - -`DATETIMEV2 milliseconds_add(DATETIMEV2 basetime, INT delta)` -- basetime: Base time whose type is DATETIMEV2 -- delta:Milliseconds to add to basetime -- Return type of this function is DATETIMEV2 - -### example -``` -mysql> select milliseconds_add('2023-09-08 16:02:08.435123', 1); -+--------------------------------------------------------------------------+ -| milliseconds_add(cast('2023-09-08 16:02:08.435123' as DATETIMEV2(6)), 1) | -+--------------------------------------------------------------------------+ -| 2023-09-08 16:02:08.436123 | -+--------------------------------------------------------------------------+ -1 row in set (0.04 sec) -``` - - -### keywords - milliseconds_add - - \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/milliseconds-diff.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/milliseconds-diff.md deleted file mode 100644 index 94f4b678e3a474..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/milliseconds-diff.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -{ - "title": "MILLISECONDS_DIFF", - "language": "en" -} ---- - - - -## milliseconds_diff -### description -#### Syntax - -`INT milliseconds_diff(DATETIME enddate, DATETIME startdate)` - -How many milliseconds is the difference between the start time and the end time? - -### example - -``` -mysql> select milliseconds_diff('2020-12-25 21:00:00.623000','2020-12-25 21:00:00.123000'); -+-----------------------------------------------------------------------------------------------------------------------------+ -| milliseconds_diff(cast('2020-12-25 21:00:00.623000' as DATETIMEV2(6)), cast('2020-12-25 21:00:00.123000' as DATETIMEV2(6))) | -+-----------------------------------------------------------------------------------------------------------------------------+ -| 500 | -+-----------------------------------------------------------------------------------------------------------------------------+ -1 row in set (0.03 sec) -``` - -### keywords - - milliseconds_diff diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/milliseconds-sub.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/milliseconds-sub.md deleted file mode 100644 index 500579d5a07cb0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/milliseconds-sub.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -{ - "title": "MILLISECONDS_SUB", - "language": "en" -} ---- - - - -## milliseconds_sub -### description -#### Syntax - -`DATETIMEV2 milliseconds_sub(DATETIMEV2 basetime, INT delta)` -- basetime: Base time whose type is DATETIMEV2 -- delta: Milliseconds to subtract from basetime -- Return type of this function is DATETIMEV2 - -### example -``` -mysql> select milliseconds_sub('2023-09-08 16:02:08.435123', 1); -+--------------------------------------------------------------------------+ -| milliseconds_sub(cast('2023-09-08 16:02:08.435123' as DATETIMEV2(6)), 1) | -+--------------------------------------------------------------------------+ -| 2023-09-08 16:02:08.434123 | -+--------------------------------------------------------------------------+ -1 row in set (0.11 sec) -``` - - -### keywords - milliseconds_sub - - \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/minute.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/minute.md deleted file mode 100644 index 5af9edccce40e1..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/minute.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "MINUTE", - "language": "en" -} ---- - - - -## minute -### description -#### Syntax - -`INT MINUTE(DATETIME date)` - -Returns minute information in the time type, ranging from 0,59 - -The parameter is Date or Datetime type - -### example - -``` -mysql> select minute('2018-12-31 23:59:59'); -+-----------------------------+ -| minute('2018-12-31 23:59:59') | -+-----------------------------+ -| 59 | -+-----------------------------+ -``` -### keywords - MINUTE diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/minutes-add.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/minutes-add.md deleted file mode 100644 index 7c4636db8c8558..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/minutes-add.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "MINUTES_ADD", - "language": "en" -} ---- - - - -## minutes_add -### description -#### Syntax - -`DATETIME MINUTES_ADD(DATETIME date, INT minutes)` - -Add specified minutes from date time or date - -The parameter date can be DATETIME or DATE, and the return type is DATETIME. - -### example - -``` -mysql> select minutes_add("2020-02-02", 1); -+---------------------------------------+ -| minutes_add('2020-02-02 00:00:00', 1) | -+---------------------------------------+ -| 2020-02-02 00:01:00 | -+---------------------------------------+ -``` - -### keywords - - MINUTES_ADD diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/minutes-diff.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/minutes-diff.md deleted file mode 100644 index b8c7fd1c1b880c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/minutes-diff.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "MINUTES_DIFF", - "language": "en" -} ---- - - - -## minutes_diff -### description -#### Syntax - -`INT minutes_diff(DATETIME enddate, DATETIME startdate)` - -The difference between the start time and the end time is a few minutes - -### example - -``` -mysql> select minutes_diff('2020-12-25 22:00:00','2020-12-25 21:00:00'); -+------------------------------------------------------------+ -| minutes_diff('2020-12-25 22:00:00', '2020-12-25 21:00:00') | -+------------------------------------------------------------+ -| 60 | -+------------------------------------------------------------+ -``` - -### keywords - - minutes_diff diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/minutes-sub.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/minutes-sub.md deleted file mode 100644 index 42612d96f501f4..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/minutes-sub.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "MINUTES_SUB", - "language": "en" -} ---- - - - -## minutes_sub -### description -#### Syntax - -`DATETIME MINUTES_SUB(DATETIME date, INT minutes)` - -Subtracts a specified number of minutes from a datetime or date - -The parameter date can be DATETIME or DATE, and the return type is DATETIME. - -### example - -``` -mysql> select minutes_sub("2020-02-02 02:02:02", 1); -+---------------------------------------+ -| minutes_sub('2020-02-02 02:02:02', 1) | -+---------------------------------------+ -| 2020-02-02 02:01:02 | -+---------------------------------------+ -``` - -### keywords - - MINUTES_SUB diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/month.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/month.md deleted file mode 100644 index 9a3a71f15be76f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/month.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -{ - "title": "MONTH", - "language": "en" -} ---- - - - -## month -### Description -#### Syntax - -`INT MONTH (DATETIME date)` - - -Returns month information in the time type, ranging from 1,12 - -The parameter is Date or Datetime type - -### example - -``` -mysql> select month('1987-01-01'); -+-----------------------------+ -| month('1987-01-01 00:00:00') | -+-----------------------------+ -| 1 | -+-----------------------------+ -``` -### keywords - MONTH diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/monthname.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/monthname.md deleted file mode 100644 index ae1852d1aa8cca..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/monthname.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -{ - "title": "MONTHNAME", - "language": "en" -} ---- - - - -## monthname -### Description -#### Syntax - -`VARCHAR MONTHNAME (DATE)` - - -Month name corresponding to return date - -The parameter is Date or Datetime type - -### example - -``` -mysql> select monthname('2008-02-03 00:00:00'); -+----------------------------------+ -| monthname('2008-02-03 00:00:00') | -+----------------------------------+ -| February | -+----------------------------------+ -``` -### keywords - MONTHNAME diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/months-add.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/months-add.md deleted file mode 100644 index 48381a10f60953..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/months-add.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "MONTHS_ADD", - "language": "en" -} ---- - - - -## months_add -### description -#### Syntax - -`DATETIME MONTHS_ADD(DATETIME date, INT months)` - -Add the specified month from the date - -The parameter date can be DATETIME or DATE, and the return type is consistent with that of the parameter date. - -### example - -``` -mysql> select months_add("2020-01-31 02:02:02", 1); -+--------------------------------------+ -| months_add('2020-01-31 02:02:02', 1) | -+--------------------------------------+ -| 2020-02-29 02:02:02 | -+--------------------------------------+ -``` - -### keywords - - MONTHS_ADD diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/months-diff.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/months-diff.md deleted file mode 100644 index d39a24d0995590..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/months-diff.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "MONTHS_DIFF", - "language": "en" -} ---- - - - -## months_diff -### description -#### Syntax - -`INT months_diff(DATETIME enddate, DATETIME startdate)` - -The difference between the start time and the end time is months - -### example - -``` -mysql> select months_diff('2020-12-25','2020-10-25'); -+-----------------------------------------------------------+ -| months_diff('2020-12-25 00:00:00', '2020-10-25 00:00:00') | -+-----------------------------------------------------------+ -| 2 | -+-----------------------------------------------------------+ -``` - -### keywords - - months_diff diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/months-sub.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/months-sub.md deleted file mode 100644 index cf0e5efa64f9b8..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/months-sub.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "MONTHS_SUB", - "language": "en" -} ---- - - - -## months_sub -### description -#### Syntax - -`DATETIME MONTHS_SUB(DATETIME date, INT months)` - -Subtracts a specified number of months from a datetime or date - -The parameter date can be DATETIME or DATE, and the return type is consistent with that of the parameter date. - -### example - -``` -mysql> select months_sub("2020-02-02 02:02:02", 1); -+--------------------------------------+ -| months_sub('2020-02-02 02:02:02', 1) | -+--------------------------------------+ -| 2020-01-02 02:02:02 | -+--------------------------------------+ -``` - -### keywords - - MONTHS_SUB diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/now.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/now.md deleted file mode 100644 index a7f3cdfc12dc84..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/now.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -{ - "title": "NOW", - "language": "en" -} ---- - - - -## now -### Description -#### Syntax - -`DATETIME NOW ()` - - -Get the current time and return it in Datetime type. - -### example - -``` -mysql> select now(); -+---------------------+ -| now() | -+---------------------+ -| 2019-05-27 15:58:25 | -+---------------------+ -``` - -`DATETIMEV2 NOW(INT precision)` - - -Get the current time and return it in DatetimeV2 type. -Precision represents the second precision that the user wants. The current precision supports up to microseconds, that is, the value range of precision is [0, 6]. - -### example - -``` -mysql> select now(3); -+-------------------------+ -| now(3) | -+-------------------------+ -| 2022-09-06 16:13:30.078 | -+-------------------------+ -``` - -Note: -1. Currently, only DatetimeV2 type supports precision. -2. Limited by the JDK implementation, if you use jdk8 to build FE, the precision can be up to milliseconds (three decimal places), and the larger precision bits will be filled with 0. If you need higher accuracy, please use jdk11 to build FE. - -### keywords - NOW diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/quarter.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/quarter.md deleted file mode 100644 index dc021ad6d5d6ac..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/quarter.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "QUARTER", - "language": "en" -} ---- - - - -## quarter -### description -#### Syntax - -`INT quarter(DATETIME date)` - -Returns the quarter to which the specified date belongs, as an INT - -### Example - -``` -mysql> select quarter('2022-09-22 17:00:00'); -+--------------------------------+ -| quarter('2022-09-22 17:00:00') | -+--------------------------------+ -| 3 | -+--------------------------------+ -``` - -### keywords - - quarter diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/sec-to-time.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/sec-to-time.md deleted file mode 100644 index c83764cb856fb1..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/sec-to-time.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "SEC_TO_TIME", - "language": "en" -} ---- - - - -## sec_to_time -### description -#### Syntax - -`TIME sec_to_time(INT timestamp)` - -The parameter is a timestamp of type INT, and the function returns a time of type TIME. - -### example - -``` -mysql >select sec_to_time(time_to_sec(cast('16:32:18' as time))); -+----------------------------------------------------+ -| sec_to_time(time_to_sec(CAST('16:32:18' AS TIME))) | -+----------------------------------------------------+ -| 16:32:18 | -+----------------------------------------------------+ -1 row in set (0.53 sec) -``` - -### keywords - SEC_TO_TIME diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/second.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/second.md deleted file mode 100644 index c33853685ecb74..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/second.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "SECOND", - "language": "en" -} ---- - - - -## second -### description -#### Syntax - -`INT SECOND(DATETIME date)` - -Returns second information in the time type, ranging from 0,59 - -The parameter is Date or Datetime type - -### example - -``` -mysql> select second('2018-12-31 23:59:59'); -+-----------------------------+ -| second('2018-12-31 23:59:59') | -+-----------------------------+ -| 59 | -+-----------------------------+ -``` -### keywords - SECOND diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/seconds-add.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/seconds-add.md deleted file mode 100644 index dcf5bdfb8ef59a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/seconds-add.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "SECONDS_ADD", - "language": "en" -} ---- - - - -## seconds_add -### description -#### Syntax - -`DATETIME SECONDS_ADD(DATETIME date, INT seconds)` - -ADD a specified number of seconds from a datetime or date - -The parameter date can be DATETIME or DATE, and the return type is DATETIME. - -### example - -``` -mysql> select seconds_add("2020-02-02 02:02:02", 1); -+---------------------------------------+ -| seconds_add('2020-02-02 02:02:02', 1) | -+---------------------------------------+ -| 2020-02-02 02:02:03 | -+---------------------------------------+ -``` - -### keywords - - SECONDS_ADD diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/seconds-diff.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/seconds-diff.md deleted file mode 100644 index c8e5e5208f3f93..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/seconds-diff.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "SECONDS_DIFF", - "language": "en" -} ---- - - - -## seconds_diff -### description -#### Syntax - -`INT seconds_diff(DATETIME enddate, DATETIME startdate)` - -The difference between the start time and the end time is seconds - -### example - -``` -mysql> select seconds_diff('2020-12-25 22:00:00','2020-12-25 21:00:00'); -+------------------------------------------------------------+ -| seconds_diff('2020-12-25 22:00:00', '2020-12-25 21:00:00') | -+------------------------------------------------------------+ -| 3600 | -+------------------------------------------------------------+ -``` - -### keywords - - seconds_diff diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/seconds-sub.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/seconds-sub.md deleted file mode 100644 index 6708256636579d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/seconds-sub.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "SECONDS_SUB", - "language": "en" -} ---- - - - -## seconds_sub -### description -#### Syntax - -`DATETIME SECONDS_SUB(DATETIME date, INT seconds)` - -Subtracts a specified number of seconds from a datetime or date - -The parameter date can be DATETIME or DATE, and the return type is DATETIME. - -### example - -``` -mysql> select seconds_sub("2020-01-01 00:00:00", 1); -+---------------------------------------+ -| seconds_sub('2020-01-01 00:00:00', 1) | -+---------------------------------------+ -| 2019-12-31 23:59:59 | -+---------------------------------------+ -``` - -### keywords - - SECONDS_SUB diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/str-to-date.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/str-to-date.md deleted file mode 100644 index 4b46dc25543600..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/str-to-date.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -{ - "title": "STR_TO_DATE", - "language": "en" -} ---- - - - -## Str_to_date -### Description -#### Syntax - -`DATETIME STR TWO DATES (VARCHAR STR, VARCHAR format)` - - -Convert STR to DATE type by format specified, if the conversion result does not return NULL. Note that the 'format' parameter specifies the format of the first parameter. - -All formats in [date_format](./date-format) are supported. In addition, support auto completing the remainder of date part for '%Y' and '%Y-%m'. - -### example - -``` -mysql> select str_to_date('2014-12-21 12:34:56', '%Y-%m-%d %H:%i:%s'); -+---------------------------------------------------------+ -| str_to_date('2014-12-21 12:34:56', '%Y-%m-%d %H:%i:%s') | -+---------------------------------------------------------+ -| 2014-12-21 12:34:56 | -+---------------------------------------------------------+ - -mysql> select str_to_date('2014-12-21 12:34%3A56', '%Y-%m-%d %H:%i%%3A%s'); -+--------------------------------------------------------------+ -| str_to_date('2014-12-21 12:34%3A56', '%Y-%m-%d %H:%i%%3A%s') | -+--------------------------------------------------------------+ -| 2014-12-21 12:34:56 | -+--------------------------------------------------------------+ - -mysql> select str_to_date('200442 Monday', '%X%V %W'); -+-----------------------------------------+ -| str_to_date('200442 Monday', '%X%V %W') | -+-----------------------------------------+ -| 2004-10-18 | -+-----------------------------------------+ - -mysql> select str_to_date("2020-09-01", "%Y-%m-%d %H:%i:%s"); -+------------------------------------------------+ -| str_to_date('2020-09-01', '%Y-%m-%d %H:%i:%s') | -+------------------------------------------------+ -| 2020-09-01 00:00:00 | -+------------------------------------------------+ - -mysql> select str_to_date('2023','%Y'); -+---------------------------+ -| str_to_date('2023', '%Y') | -+---------------------------+ -| 2023-01-01 | -+---------------------------+ -``` -### keywords - - STR_TO_DATE,STR,TO,DATE diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/time-round.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/time-round.md deleted file mode 100644 index d4bbc408b07ae0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/time-round.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -{ - "title": "TIME_ROUND", - "language": "en" -} ---- - - - -## time_round -### description -#### Syntax - -```sql -DATETIME TIME_ROUND(DATETIME expr) -DATETIME TIME_ROUND(DATETIME expr, INT period) -DATETIME TIME_ROUND(DATETIME expr, DATETIME origin) -DATETIME TIME_ROUND(DATETIME expr, INT period, DATETIME origin) -``` - -The function name `TIME_ROUND` consists of two parts, Each part consists of the following optional values. -- `TIME`: `SECOND`, `MINUTE`, `HOUR`, `DAY`, `WEEK`, `MONTH`, `YEAR` -- `ROUND`: `FLOOR`, `CEIL` - -Returns the upper/lower bound of `expr`. - -- `period` specifies how many `TIME` units, the default is `1`. -- `origin` specifies the start time of the period, the default is `1970-01-01T00:00:00`, the start time of `WEEK` is Sunday, which is `1970-01-04T00:00:00`. Could be larger than `expr`. -- Please try to choose common `period`, such as 3 `MONTH`, 90 `MINUTE`. If you set a uncommon `period`, please also specify `origin`. - -### example - -``` - -MySQL> SELECT YEAR_FLOOR('20200202000000'); -+------------------------------+ -| year_floor('20200202000000') | -+------------------------------+ -| 2020-01-01 00:00:00 | -+------------------------------+ - - -MySQL> SELECT MONTH_CEIL(CAST('2020-02-02 13:09:20' AS DATETIME), 3); --quarter -+--------------------------------------------------------+ -| month_ceil(CAST('2020-02-02 13:09:20' AS DATETIME), 3) | -+--------------------------------------------------------+ -| 2020-04-01 00:00:00 | -+--------------------------------------------------------+ - - -MySQL> SELECT WEEK_CEIL('2020-02-02 13:09:20', '2020-01-06'); --monday -+---------------------------------------------------------+ -| week_ceil('2020-02-02 13:09:20', '2020-01-06 00:00:00') | -+---------------------------------------------------------+ -| 2020-02-03 00:00:00 | -+---------------------------------------------------------+ - - -MySQL> SELECT MONTH_CEIL(CAST('2020-02-02 13:09:20' AS DATETIME), 3, CAST('1970-01-09 00:00:00' AS DATETIME)); --next rent day -+-------------------------------------------------------------------------------------------------+ -| month_ceil(CAST('2020-02-02 13:09:20' AS DATETIME), 3, CAST('1970-01-09 00:00:00' AS DATETIME)) | -+-------------------------------------------------------------------------------------------------+ -| 2020-04-09 00:00:00 | -+-------------------------------------------------------------------------------------------------+ - -``` -### keywords - TIME_ROUND diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/time-to-sec.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/time-to-sec.md deleted file mode 100644 index f0ac58826eca88..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/time-to-sec.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "TIME_TO_SEC", - "language": "en" -} ---- - - - -## time_to_sec -### description -#### Syntax - -`INT time_to_sec(TIME datetime)` - -input parameter is the time type -Convert the specified time value to seconds, returned result is: hours × 3600+ minutes×60 + seconds. - -### example - -``` -mysql >select current_time(),time_to_sec(current_time()); -+----------------+-----------------------------+ -| current_time() | time_to_sec(current_time()) | -+----------------+-----------------------------+ -| 16:32:18 | 59538 | -+----------------+-----------------------------+ -1 row in set (0.01 sec) -``` -### keywords - TIME_TO_SEC diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/timediff.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/timediff.md deleted file mode 100644 index 505b2197b4eede..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/timediff.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -{ - "title": "TIMEDIFF", - "language": "en" -} ---- - - - -## timediff -### Description -#### Syntax - -`TIME TIMEDIFF (DATETIME expr1, DATETIME expr2)` - - -TIMEDIFF returns the difference between two DATETIMEs - -The TIMEDIFF function returns the result of expr1 - expr2 expressed as a time value, with a return value of TIME type -Due to the valid range of TIME type being '-838:59:59' to '838:59:59', -So when the return value of the calculation result is less than the left boundary or greater than the right boundary, the corresponding boundary value will be taken. - -#### example - -``` -mysql> SELECT TIMEDIFF(now(),utc_timestamp()); -+----------------------------------+ -| timediff(now(), utc_timestamp()) | -+----------------------------------+ -| 08:00:00 | -+----------------------------------+ - -mysql> SELECT TIMEDIFF('2019-07-11 16:59:30','2019-07-11 16:59:21'); -+--------------------------------------------------------+ -| timediff('2019-07-11 16:59:30', '2019-07-11 16:59:21') | -+--------------------------------------------------------+ -| 00:00:09 | -+--------------------------------------------------------+ - -mysql> SELECT TIMEDIFF('2019-01-01 00:00:00', NULL); -+---------------------------------------+ -| timediff('2019-01-01 00:00:00', NULL) | -+---------------------------------------+ -| NULL | -+---------------------------------------+ - -mysql >SELECT timediff('2020-02-02 15:30:00', '1951-02-16 15:27:00') as res; -+-----------+ -| res | -+-----------+ -| 838:59:59 | -+-----------+ -``` -### keywords - TIMEDIFF diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/timestampadd.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/timestampadd.md deleted file mode 100644 index 2d97cb08247768..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/timestampadd.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -{ - "title": "TIMESTAMPADD", - "language": "en" -} ---- - - - -## timestampadd -### description -#### Syntax - -`DATETIME TIMESTAMPADD(unit, interval, DATETIME datetime_expr)` - -Adds the integer expression interval to the date or datetime expression datetime_expr. - -The unit for interval is given by the unit argument, which should be one of the following values: - -SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, or YEAR. - -### example - -``` - -mysql> SELECT TIMESTAMPADD(MINUTE,1,'2019-01-02'); -+------------------------------------------------+ -| timestampadd(MINUTE, 1, '2019-01-02 00:00:00') | -+------------------------------------------------+ -| 2019-01-02 00:01:00 | -+------------------------------------------------+ - -mysql> SELECT TIMESTAMPADD(WEEK,1,'2019-01-02'); -+----------------------------------------------+ -| timestampadd(WEEK, 1, '2019-01-02 00:00:00') | -+----------------------------------------------+ -| 2019-01-09 00:00:00 | -+----------------------------------------------+ -``` -### keywords - TIMESTAMPADD diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/timestampdiff.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/timestampdiff.md deleted file mode 100644 index dd5769852d555d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/timestampdiff.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -{ - "title": "TIMESTAMPDIFF", - "language": "en" -} ---- - - - -## timestampdiff -### description -#### Syntax - -`INT TIMESTAMPDIFF(unit, DATETIME datetime_expr1, DATETIME datetime_expr2)` - -Returns datetime_expr2 − datetime_expr1, where datetime_expr1 and datetime_expr2 are date or datetime expressions. - -The unit for the result (an integer) is given by the unit argument. - -The legal values for unit are the same as those listed in the description of the TIMESTAMPADD() function. - -### example - -``` - -MySQL> SELECT TIMESTAMPDIFF(MONTH,'2003-02-01','2003-05-01'); -+--------------------------------------------------------------------+ -| timestampdiff(MONTH, '2003-02-01 00:00:00', '2003-05-01 00:00:00') | -+--------------------------------------------------------------------+ -| 3 | -+--------------------------------------------------------------------+ - -MySQL> SELECT TIMESTAMPDIFF(YEAR,'2002-05-01','2001-01-01'); -+-------------------------------------------------------------------+ -| timestampdiff(YEAR, '2002-05-01 00:00:00', '2001-01-01 00:00:00') | -+-------------------------------------------------------------------+ -| -1 | -+-------------------------------------------------------------------+ - - -MySQL> SELECT TIMESTAMPDIFF(MINUTE,'2003-02-01','2003-05-01 12:05:55'); -+---------------------------------------------------------------------+ -| timestampdiff(MINUTE, '2003-02-01 00:00:00', '2003-05-01 12:05:55') | -+---------------------------------------------------------------------+ -| 128885 | -+---------------------------------------------------------------------+ - -``` -### keywords - TIMESTAMPDIFF diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/to-date.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/to-date.md deleted file mode 100644 index b459a44bea6fc7..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/to-date.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "TO_DATE", - "language": "en" -} ---- - - - -## to_date -### description -#### Syntax - -`DATE TO_DATE(DATETIME)` - -Return the DATE part of DATETIME value. - -### example - -``` -mysql> select to_date("2020-02-02 00:00:00"); -+--------------------------------+ -| to_date('2020-02-02 00:00:00') | -+--------------------------------+ -| 2020-02-02 | -+--------------------------------+ -``` - -### keywords - - TO_DATE diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/to-days.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/to-days.md deleted file mode 100644 index db0bd9c9179d12..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/to-days.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "TO_DAYS", - "language": "en" -} ---- - - - -## to_days -### Description -#### Syntax - -`INT TO DAYS` - - -Days of returning date distance 0000-01-01 - -The parameter is Date or Datetime type - -### example - -``` -mysql> select to_days('2007-10-07'); -+-----------------------+ -| to_days('2007-10-07') | -+-----------------------+ -| 733321 | -+-----------------------+ -``` - -### keywords - TO_DAYS,TO,DAYS diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/to-monday.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/to-monday.md deleted file mode 100644 index ffc8daa3891af2..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/to-monday.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -{ - "title": "TO_MONDAY", - "language": "en" -} ---- - - - -## to_monday -### Description -#### Syntax - -`DATE to_monday(DATETIME date)` - -Round a date or datetime down to the nearest Monday, return type is Date or DateV2. -Specially, input 1970-01-01, 1970-01-02, 1970-01-03 and 1970-01-04 will return '1970-01-01' - -### example - -``` -MySQL [(none)]> select to_monday('2022-09-10'); -+----------------------------------+ -| to_monday('2022-09-10 00:00:00') | -+----------------------------------+ -| 2022-09-05 | -+----------------------------------+ -``` - -### keywords - MONDAY diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/unix-timestamp.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/unix-timestamp.md deleted file mode 100644 index 9995b3750f9fe0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/unix-timestamp.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -{ - "title": "UNIX_TIMESTAMP", - "language": "en" -} ---- - - - -## unix_timestamp -### Description -#### Syntax - -`INT UNIX_TIMESTAMP([DATETIME date[, STRING fmt]])` - -Converting a Date or Datetime type to a UNIX timestamp. - -If there are no parameters, the current time is converted into a timestamp. - -The parameter needs to be Date or Datetime type. - -Any date before 1970-01-01 00:00:00 or after 2038-01-19 03:14:07 will return 0. - -See `date_format` function to get Format explanation. - -This function is affected by time zone. - -### example - -``` -mysql> select unix_timestamp(); -+------------------+ -| unix_timestamp() | -+------------------+ -| 1558589570 | -+------------------+ - -mysql> select unix_timestamp('2007-11-30 10:30:19'); -+---------------------------------------+ -| unix_timestamp('2007-11-30 10:30:19') | -+---------------------------------------+ -| 1196389819 | -+---------------------------------------+ - -mysql> select unix_timestamp('2007-11-30 10:30-19', '%Y-%m-%d %H:%i-%s'); -+---------------------------------------+ -| unix_timestamp('2007-11-30 10:30-19') | -+---------------------------------------+ -| 1196389819 | -+---------------------------------------+ - -mysql> select unix_timestamp('2007-11-30 10:30%3A19', '%Y-%m-%d %H:%i%%3A%s'); -+---------------------------------------+ -| unix_timestamp('2007-11-30 10:30%3A19') | -+---------------------------------------+ -| 1196389819 | -+---------------------------------------+ - -mysql> select unix_timestamp('1969-01-01 00:00:00'); -+---------------------------------------+ -| unix_timestamp('1969-01-01 00:00:00') | -+---------------------------------------+ -| 0 | -+---------------------------------------+ -``` - -### keywords - - UNIX_TIMESTAMP,UNIX,TIMESTAMP diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/utc-timestamp.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/utc-timestamp.md deleted file mode 100644 index c35fc5132b3aa8..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/utc-timestamp.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -{ - "title": "UTC_TIMESTAMP", - "language": "en" -} ---- - - - -## utc_timestamp -### Description -#### Syntax - -`DATETIME UTC_TIMESTAMP()` - - -Returns the current UTC date and time in "YYYY-MM-DD HH: MM: SS" or - -A Value of "YYYYMMDDHMMSS" Format - -Depending on whether the function is used in a string or numeric context - -### example - -``` -mysql> select utc_timestamp(),utc_timestamp() + 1; -+---------------------+---------------------+ -| utc_timestamp() | utc_timestamp() + 1 | -+---------------------+---------------------+ -| 2019-07-10 12:31:18 | 20190710123119 | -+---------------------+---------------------+ -``` -### keywords - UTC_TIMESTAMP,UTC,TIMESTAMP diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/week.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/week.md deleted file mode 100644 index 5832767fcd2e67..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/week.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -{ - "title": "WEEK", - "language": "en" -} ---- - - - -## week -### Description -#### Syntax - -`INT WEEK(DATE date[, INT mode])` - -Returns the week number for date.The value of the mode argument defaults to 0. -The following table describes how the mode argument works. - -|Mode |First day of week |Range |Week 1 is the first week … | -|:----|:-----------------|:------|:-----------------------------| -|0 |Sunday |0-53 |with a Sunday in this year | -|1 |Monday |0-53 |with 4 or more days this year | -|2 |Sunday |1-53 |with a Sunday in this year | -|3 |Monday |1-53 |with 4 or more days this year | -|4 |Sunday |0-53 |with 4 or more days this year | -|5 |Monday |0-53 |with a Monday in this year | -|6 |Sunday |1-53 |with 4 or more days this year | -|7 |Monday |1-53 |with a Monday in this year | - -The parameter is Date or Datetime type - -### example -``` -mysql> select week('2020-1-1'); -+------------------+ -| week('2020-1-1') | -+------------------+ -| 0 | -+------------------+ -``` -``` -mysql> select week('2020-7-1',1); -+---------------------+ -| week('2020-7-1', 1) | -+---------------------+ -| 27 | -+---------------------+ -``` -### keywords - WEEK diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/weekday.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/weekday.md deleted file mode 100644 index a0b578f3b5f454..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/weekday.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -{ - "title": "WEEKDAY", - "language": "en" -} ---- - - - -## weekday -### Description -#### Syntax - -`INT WEEKDAY (DATETIME date)` - - -The WEEKDAY function returns the index value of the working day of the date, that is, 0 on Monday, 1 on Tuesday, and 6 on Sunday. - -The parameter is Date or Datetime type - -Notice the difference between WEEKDAY and DAYOFWEEK: -``` - +-----+-----+-----+-----+-----+-----+-----+ - | Sun | Mon | Tues| Wed | Thur| Fri | Sat | - +-----+-----+-----+-----+-----+-----+-----+ - weekday | 6 | 0 | 1 | 2 | 3 | 4 | 5 | - +-----+-----+-----+-----+-----+-----+-----+ -dayofweek | 1 | 2 | 3 | 4 | 5 | 6 | 7 | - +-----+-----+-----+-----+-----+-----+-----+ -``` - -### example -``` -mysql> select weekday('2019-06-25'); -+--------------------------------+ -| weekday('2019-06-25 00:00:00') | -+--------------------------------+ -| 1 | -+--------------------------------+ - -mysql> select weekday(cast(20190625 as date)); -+---------------------------------+ -| weekday(CAST(20190625 AS DATE)) | -+---------------------------------+ -| 1 | -+---------------------------------+ -``` -### keywords - WEEKDAY diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/weekofyear.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/weekofyear.md deleted file mode 100644 index 4eed93b6b1d254..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/weekofyear.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "WEEKOFYEAR", - "language": "en" -} ---- - - - -## weekofyear -### Description -#### Syntax - -`INT WEEKOFYEAR (DATETIME DATE)` - - - -Get the Weeks of the Year - -The parameter is Date or Datetime type - -### example - -``` -mysql> select weekofyear('2008-02-20 00:00:00'); -+-----------------------------------+ -| weekofyear('2008-02-20 00:00:00') | -+-----------------------------------+ -| 8 | -+-----------------------------------+ -``` -### keywords - WEEKOFYEAR diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/weeks-add.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/weeks-add.md deleted file mode 100644 index bee4e5fcc080ec..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/weeks-add.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "WEEKS_ADD", - "language": "en" -} ---- - - - -## weeks_add -### description -#### Syntax - -`DATETIME WEEKS_ADD(DATETIME date, INT weeks)` - -ADD a specified number of weeks from a datetime or date - -The parameter date can be DATETIME or DATE, and the return type is consistent with that of the parameter date. - -### example - -``` -mysql> select weeks_add("2020-02-02 02:02:02", 1); -+-------------------------------------+ -| weeks_add('2020-02-02 02:02:02', 1) | -+-------------------------------------+ -| 2020-02-09 02:02:02 | -+-------------------------------------+ -``` - -### keywords - - WEEKS_ADD diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/weeks-diff.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/weeks-diff.md deleted file mode 100644 index e3579a9195453f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/weeks-diff.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "WEEKS_DIFF", - "language": "en" -} ---- - - - -## weeks_diff -### description -#### Syntax - -`INT weeks_diff(DATETIME enddate, DATETIME startdate)` - -The difference between the start time and the end time is weeks - -### example - -``` -mysql> select weeks_diff('2020-12-25','2020-10-25'); -+----------------------------------------------------------+ -| weeks_diff('2020-12-25 00:00:00', '2020-10-25 00:00:00') | -+----------------------------------------------------------+ -| 8 | -+----------------------------------------------------------+ -``` - -### keywords - - weeks_diff diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/weeks-sub.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/weeks-sub.md deleted file mode 100644 index 3cb574fd0c5565..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/weeks-sub.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "WEEKS_SUB", - "language": "en" -} ---- - - - -## weeks_sub -### description -#### Syntax - -`DATETIME WEEKS_SUB(DATETIME date, INT weeks)` - -Subtracts a specified number of weeks from a datetime or date - -The parameter date can be DATETIME or DATE, and the return type is consistent with that of the parameter date. - -### example - -``` -mysql> select weeks_sub("2020-02-02 02:02:02", 1); -+-------------------------------------+ -| weeks_sub('2020-02-02 02:02:02', 1) | -+-------------------------------------+ -| 2020-01-26 02:02:02 | -+-------------------------------------+ -``` - -### keywords - - WEEKS_SUB diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/year.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/year.md deleted file mode 100644 index 2665ed5a8a4681..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/year.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "YEAR", - "language": "en" -} ---- - - - - -## year -### Description -#### Syntax - -`INT YEAR(DATETIME date)` - - -Returns the year part of the date type, ranging from 1000 to 9999 - -The parameter is Date or Datetime type - -### example - -``` -mysql> select year('1987-01-01'); -+-----------------------------+ -| year('1987-01-01 00:00:00') | -+-----------------------------+ -| 1987 | -+-----------------------------+ -``` -### keywords - YEAR diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/years-add.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/years-add.md deleted file mode 100644 index 7b9a7cc5b91032..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/years-add.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "YEARS_ADD", - "language": "en" -} ---- - - - -## years_add -### description -#### Syntax - -`DATETIME YEARS_ADD(DATETIME date, INT years)` - -ADD a specified number of years from a datetime or date - -The parameter date can be DATETIME or DATE, and the return type is consistent with that of the parameter date. - -### example - -``` -mysql> select years_add("2020-01-31 02:02:02", 1); -+-------------------------------------+ -| years_add('2020-01-31 02:02:02', 1) | -+-------------------------------------+ -| 2021-01-31 02:02:02 | -+-------------------------------------+ -``` - -### keywords - - YEARS_ADD diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/years-diff.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/years-diff.md deleted file mode 100644 index bb787b640964c2..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/years-diff.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "YEARS_DIFF", - "language": "en" -} ---- - - - -## years_diff -### description -#### Syntax - -`INT years_diff(DATETIME enddate, DATETIME startdate)` - -The difference between the start time and the end time is several years - -### example - -``` -mysql> select years_diff('2020-12-25','2019-10-25'); -+----------------------------------------------------------+ -| years_diff('2020-12-25 00:00:00', '2019-10-25 00:00:00') | -+----------------------------------------------------------+ -| 1 | -+----------------------------------------------------------+ -``` - -### keywords - - years_diff diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/years-sub.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/years-sub.md deleted file mode 100644 index 7cd68cf4a7aa16..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/years-sub.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "YEARS_SUB", - "language": "en" -} ---- - - - -## years_sub -### description -#### Syntax - -`DATETIME YEARS_SUB(DATETIME date, INT years)` - -Subtracts a specified number of years from a datetime or date - -The parameter date can be DATETIME or DATE, and the return type is consistent with that of the parameter date. - -### example - -``` -mysql> select years_sub("2020-02-02 02:02:02", 1); -+-------------------------------------+ -| years_sub('2020-02-02 02:02:02', 1) | -+-------------------------------------+ -| 2019-02-02 02:02:02 | -+-------------------------------------+ -``` - -### keywords - - YEARS_SUB diff --git a/docs/en/docs/sql-manual/sql-functions/date-time-functions/yearweek.md b/docs/en/docs/sql-manual/sql-functions/date-time-functions/yearweek.md deleted file mode 100644 index 65a9e935187942..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/date-time-functions/yearweek.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -{ - "title": "YEARWEEK", - "language": "en" -} ---- - - - -## yearweek -### Description -#### Syntax - -`INT YEARWEEK(DATE date[, INT mode])` - -Returns year and week for a date.The value of the mode argument defaults to 0. -When the week of the date belongs to the previous year, the year and week of the previous year are returned; -when the week of the date belongs to the next year, the year of the next year is returned and the week is 1. - -The following table describes how the mode argument works. - -|Mode |First day of week |Range |Week 1 is the first week … | -|:----|:-----------------|:-------|:-----------------------------| -|0 |Sunday |1-53 |with a Sunday in this year | -|1 |Monday |1-53 |with 4 or more days this year | -|2 |Sunday |1-53 |with a Sunday in this year | -|3 |Monday |1-53 |with 4 or more days this year | -|4 |Sunday |1-53 |with 4 or more days this year | -|5 |Monday |1-53 |with a Monday in this year | -|6 |Sunday |1-53 |with 4 or more days this year | -|7 |Monday |1-53 |with a Monday in this year | - -The parameter is Date or Datetime type - -### example -``` -mysql> select yearweek('2021-1-1'); -+----------------------+ -| yearweek('2021-1-1') | -+----------------------+ -| 202052 | -+----------------------+ -``` -``` -mysql> select yearweek('2020-7-1'); -+----------------------+ -| yearweek('2020-7-1') | -+----------------------+ -| 202026 | -+----------------------+ -``` -``` -mysql> select yearweek('2024-12-30',1); -+------------------------------------+ -| yearweek('2024-12-30 00:00:00', 1) | -+------------------------------------+ -| 202501 | -+------------------------------------+ -``` - -### keywords - YEARWEEK diff --git a/docs/en/docs/sql-manual/sql-functions/digital-masking.md b/docs/en/docs/sql-manual/sql-functions/digital-masking.md deleted file mode 100644 index 9ff7c30b3e621f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/digital-masking.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "DIGITAL_MASKING", - "language": "en" -} ---- - - - -## DIGITAL_MASKING - -### description - -#### Syntax - -``` -digital_masking(digital_number) -``` - -Alias function, the original function is `concat(left(id,3),'****',right(id,4))`. - -Desensitizes the input `digital_number` and returns the result after masking desensitization. `digital_number` is `BIGINT` data type. - -### example - -1. Desensitize the cell phone number - - ```sql - mysql> select digital_masking(13812345678); - +------------------------------+ - | digital_masking(13812345678) | - +------------------------------+ - | 138****5678 | - +------------------------------+ - ``` - -### keywords - -DIGITAL_MASKING diff --git a/docs/en/docs/sql-manual/sql-functions/distance-functions/cosine-distance.md b/docs/en/docs/sql-manual/sql-functions/distance-functions/cosine-distance.md deleted file mode 100644 index 8c434a764db96a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/distance-functions/cosine-distance.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ - "title": "cosine_distance", - "language": "en" -} ---- - - - -## cosine_distance - -### description -#### Syntax - -```sql -DOUBLE cosine_distance(ARRAY array1, ARRAY array2) -``` - -Calculates the cosine distance between two vectors (the values of the vectors are the coordinates). -Return NULL if input array is NULL or any element of array is NULL. - -#### Notice -* nested type of input array support: TINYINT, SMALLINT, INT, BIGINT, LARGEINT, FLOAT, DOUBLE -* input array1 and array2 should have the same element size - -### example - -``` -sql> SELECT cosine_distance([1, 2], [2, 3]); -+-------------------------------------------+ -| cosine_distance(ARRAY(1, 2), ARRAY(2, 3)) | -+-------------------------------------------+ -| 0.0077221232863322609 | -+-------------------------------------------+ -``` - -### keywords - COSINE_DISTANCE,DISTANCE,COSINE,ARRAY diff --git a/docs/en/docs/sql-manual/sql-functions/distance-functions/inner-product.md b/docs/en/docs/sql-manual/sql-functions/distance-functions/inner-product.md deleted file mode 100644 index 72bbe051beaf1d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/distance-functions/inner-product.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ - "title": "inner_product", - "language": "en" -} ---- - - - -## inner_product - -### description -#### Syntax - -```sql -DOUBLE inner_product(ARRAY array1, ARRAY array2) -``` - -Calculates the scalar product of two vectors of the same size. -Return NULL if input array is NULL or any element of array is NULL. - -#### Notice -* nested type of input array support: TINYINT, SMALLINT, INT, BIGINT, LARGEINT, FLOAT, DOUBLE -* input array1 and array2 should have the same element size - -### example - -``` -sql> SELECT inner_product([1, 2], [2, 3]); -+-----------------------------------------+ -| inner_product(ARRAY(1, 2), ARRAY(2, 3)) | -+-----------------------------------------+ -| 8 | -+-----------------------------------------+ -``` - -### keywords - INNER_PRODUCT,DISTANCE,ARRAY diff --git a/docs/en/docs/sql-manual/sql-functions/distance-functions/l1-distance.md b/docs/en/docs/sql-manual/sql-functions/distance-functions/l1-distance.md deleted file mode 100644 index e2c2f5db821d36..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/distance-functions/l1-distance.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ - "title": "l1_distance", - "language": "en" -} ---- - - - -## l1_distance - -### description -#### Syntax - -```sql -DOUBLE l1_distance(ARRAY array1, ARRAY array2) -``` - -Calculates the distance between two points (the values of the vectors are the coordinates) in L1 space. -Return NULL if input array is NULL or any element of array is NULL. - -#### Notice -* nested type of input array support: TINYINT, SMALLINT, INT, BIGINT, LARGEINT, FLOAT, DOUBLE -* input array1 and array2 should have the same element size - -### example - -``` -sql> SELECT l1_distance([1, 2], [2, 3]); -+---------------------------------------+ -| l1_distance(ARRAY(1, 2), ARRAY(2, 3)) | -+---------------------------------------+ -| 2 | -+---------------------------------------+ -``` - -### keywords - L1_DISTANCE,DISTANCE,L1,ARRAY diff --git a/docs/en/docs/sql-manual/sql-functions/distance-functions/l2-distance.md b/docs/en/docs/sql-manual/sql-functions/distance-functions/l2-distance.md deleted file mode 100644 index 3df120b6420e58..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/distance-functions/l2-distance.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ - "title": "l2_distance", - "language": "en" -} ---- - - - -## l2_distance - -### description -#### Syntax - -```sql -DOUBLE l2_distance(ARRAY array1, ARRAY array2) -``` - -Calculates the distance between two points (the values of the vectors are the coordinates) in Euclidean space. -Return NULL if input array is NULL or any element of array is NULL. - -#### Notice -* nested type of input array support: TINYINT, SMALLINT, INT, BIGINT, LARGEINT, FLOAT, DOUBLE -* input array1 and array2 should have the same element size - -### example - -``` -sql> SELECT l2_distance([1, 2], [2, 3]); -+---------------------------------------+ -| l2_distance(ARRAY(1, 2), ARRAY(2, 3)) | -+---------------------------------------+ -| 1.4142135623730951 | -+---------------------------------------+ -``` - -### keywords - L2_DISTANCE,DISTANCE,L2,ARRAY diff --git a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/aes.md b/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/aes.md deleted file mode 100644 index 4a763f7f27e22c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/aes.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -{ -"title": "AES", -"language": "en" -} ---- - - - -## AES_ENCRYPT - -### Name - -AES_ENCRYPT - -### description - -Encryption of data using the OpenSSL. This function is consistent with the `AES_ENCRYPT` function in MySQL. Using AES_128_ECB algorithm by default, and the padding mode is PKCS7. -Reference: https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-decrypt - -### Compatibility - -1. aes_decrypt/aes_encrypt/sm4_decrypt/sm4_encrypt When the initial vector is not provided, block_encryption_mode will not take effect, and AES_128_ECB will be used for encryption and decryption in the end, which is inconsistent with the behavior of MySQL. -2. Add aes_decrypt_v2/aes_encrypt_v2/sm4_decrypt_v2/sm4_encrypt_v2 functions to support correct behavior. When no initial vector is provided, block_encryption_mode can take effect, aes-192-ecb and aes-256-ecb will be correctly encrypted and decrypted, and other block encryption modes will report an error. If there is no need to be compatible with old data, the v2 function can be used directly. - -#### Syntax - -`AES_ENCRYPT(str, key_str[, init_vector])` - -#### Arguments - -- `str`: Content to be encrypted -- `key_str`: Secret key -- `init_vector`: Initialization Vector. The default value for the block_encryption_mode system variable is aes ecb mode, which does not require an initialization vector. The alternative permitted block encryption modes CBC, CFB1, CFB8, CFB128, and OFB all require an initialization vector. - -#### Return Type - -VARCHAR(*) - -#### Remarks - -The AES_ENCRYPT function is not used the user secret key directly, but will be further processed. The specific steps are as follows: -1. Determine the number of bytes of the SECRET KEY according to the encryption algorithm used. For example, if you using AES_128_ECB, then the number of bytes of SECRET KEY are `128 / 8 = 16`(if using AES_256_ECB, then SECRET KEY length are `128 / 8 = 32`); -2. Then XOR the `i` bit and the `16*k+i` bit of the SECRET KEY entered by the user. If the length of the SECRET KEY less than 16 bytes, 0 will be padded; -3. Finally, use the newly generated key for encryption; - -### example - -```sql -select to_base64(aes_encrypt('text','F3229A0B371ED2D9441B830D21A390C3')); -``` - -The results are consistent with those executed in MySQL. - -```text -+--------------------------------+ -| to_base64(aes_encrypt('text')) | -+--------------------------------+ -| wr2JEDVXzL9+2XtRhgIloA== | -+--------------------------------+ -1 row in set (0.01 sec) -``` - -If you want to change other encryption algorithms, you can: - -```sql -set block_encryption_mode="AES_256_CBC"; -select to_base64(aes_encrypt('text','F3229A0B371ED2D9441B830D21A390C3', '0123456789')); -``` - -Here is the result: - -```text -+-----------------------------------------------------+ -| to_base64(aes_encrypt('text', '***', '0123456789')) | -+-----------------------------------------------------+ -| tsmK1HzbpnEdR2//WhO+MA== | -+-----------------------------------------------------+ -1 row in set (0.01 sec) -``` - -For more information about `block_encryption_mode`, see also [variables](../../../advanced/variables.md). - -### keywords - - AES_ENCRYPT - -## AES_DECRYPT - -### Name - -AES_DECRYPT - -### Description - -Decryption of data using the OpenSSL. This function is consistent with the `AES_DECRYPT` function in MySQL. Using AES_128_ECB algorithm by default, and the padding mode is PKCS7. - -#### Syntax - -``` -AES_DECRYPT(str,key_str[,init_vector]) -``` - -#### Arguments - -- `str`: Content that encrypted -- `key_str`: Secret key -- `init_vector`: Initialization Vector - -#### Return Type - -VARCHAR(*) - -### example - -```sql -select aes_decrypt(from_base64('wr2JEDVXzL9+2XtRhgIloA=='),'F3229A0B371ED2D9441B830D21A390C3'); -``` - -The results are consistent with those executed in MySQL. - -```text -+------------------------------------------------------+ -| aes_decrypt(from_base64('wr2JEDVXzL9+2XtRhgIloA==')) | -+------------------------------------------------------+ -| text | -+------------------------------------------------------+ -1 row in set (0.01 sec) -``` - -If you want to change other encryption algorithms, you can: - -```sql -set block_encryption_mode="AES_256_CBC"; -select aes_decrypt(from_base64('tsmK1HzbpnEdR2//WhO+MA=='),'F3229A0B371ED2D9441B830D21A390C3', '0123456789'); -``` - -Here is the result: - -```text -+---------------------------------------------------------------------------+ -| aes_decrypt(from_base64('tsmK1HzbpnEdR2//WhO+MA=='), '***', '0123456789') | -+---------------------------------------------------------------------------+ -| text | -+---------------------------------------------------------------------------+ -1 row in set (0.01 sec) -``` - -For more information about `block_encryption_mode`, see also [variables](../../../advanced/variables.md). - -### keywords - - AES_DECRYPT diff --git a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/md5.md b/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/md5.md deleted file mode 100644 index 723637544e27dc..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/md5.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ -"title": "MD5", -"language": "en" -} ---- - - - -## MD5 - -### description -Calculates an MD5 128-bit checksum for the string -#### Syntax - -`MD5(str)` - -### example - -``` -MySQL [(none)]> select md5("abc"); -+----------------------------------+ -| md5('abc') | -+----------------------------------+ -| 900150983cd24fb0d6963f7d28e17f72 | -+----------------------------------+ -1 row in set (0.013 sec) -``` - -### keywords - - MD5 \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/md5sum.md b/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/md5sum.md deleted file mode 100644 index a801e1254c9681..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/md5sum.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ -"title": "MD5SUM", -"language": "en" -} ---- - - - -## MD5SUM - -### description -Calculates an MD5 128-bit checksum for the strings -#### Syntax - -`MD5SUM(str[,str])` - -### example - -``` -MySQL > select md5("abcd"); -+----------------------------------+ -| md5('abcd') | -+----------------------------------+ -| e2fc714c4727ee9395f324cd2e7f331f | -+----------------------------------+ -1 row in set (0.011 sec) - -MySQL > select md5sum("ab","cd"); -+----------------------------------+ -| md5sum('ab', 'cd') | -+----------------------------------+ -| e2fc714c4727ee9395f324cd2e7f331f | -+----------------------------------+ -1 row in set (0.008 sec) - -``` - -### keywords - - MD5SUM \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sha.md b/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sha.md deleted file mode 100644 index 8e9a4573c1c37c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sha.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ -"title": "SHA", -"language": "en" -} ---- - - - -## SHA - -### description - -Use SHA1 to digest the message. - -#### Syntax - -`SHA(str)` or `SHA1(str)` - -#### Arguments - -- `str`: content to be encrypted - -### example - -```SQL -mysql> select sha("123"); -+------------------------------------------+ -| sha1('123') | -+------------------------------------------+ -| 40bd001563085fc35165329ea1ff5c5ecbdbbeef | -+------------------------------------------+ -1 row in set (0.13 sec) -``` - -### keywords - - SHA,SHA1 diff --git a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sha2.md b/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sha2.md deleted file mode 100644 index 2f7f838b1ffba7..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sha2.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -{ -"title": "SHA2", -"language": "en" -} ---- - - - -## SHA2 - -### description - -Use SHA2 to digest the message. - -#### Syntax - -`SHA2(str, digest_length)` - -#### Arguments - -- `str`: content to be encrypted -- `digest_length`: the length of the digest - -### example - -```SQL -mysql> select sha2('abc', 224); -+----------------------------------------------------------+ -| sha2('abc', 224) | -+----------------------------------------------------------+ -| 23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7 | -+----------------------------------------------------------+ -1 row in set (0.13 sec) - -mysql> select sha2('abc', 384); -+--------------------------------------------------------------------------------------------------+ -| sha2('abc', 384) | -+--------------------------------------------------------------------------------------------------+ -| cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7 | -+--------------------------------------------------------------------------------------------------+ -1 row in set (0.13 sec) - -mysql> select sha2(NULL, 512); -+-----------------+ -| sha2(NULL, 512) | -+-----------------+ -| NULL | -+-----------------+ -1 row in set (0.09 sec) -``` - -### keywords - - SHA2 diff --git a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sm3.md b/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sm3.md deleted file mode 100644 index 6dc63cdb9cd3cc..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sm3.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ -"title": "SM3", -"language": "en" -} ---- - - - -## SM3 - -### description -Calculates an SM3 256-bit checksum for the string -#### Syntax - -`SM3(str)` - -### example - -``` -MySQL > select sm3("abcd"); -+------------------------------------------------------------------+ -| sm3('abcd') | -+------------------------------------------------------------------+ -| 82ec580fe6d36ae4f81cae3c73f4a5b3b5a09c943172dc9053c69fd8e18dca1e | -+------------------------------------------------------------------+ -1 row in set (0.009 sec) -``` - -### keywords - - SM3 \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sm3sum.md b/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sm3sum.md deleted file mode 100644 index b8b940a1734bfb..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sm3sum.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ -"title": "SM3SUM", -"language": "en" -} ---- - - - -## SM3SUM - -### description -Calculates an SM3 128-bit checksum for the strings -#### Syntax - -`SM3SUM(str[,str])` - -### example - -``` -MySQL > select sm3("abcd"); -+------------------------------------------------------------------+ -| sm3('abcd') | -+------------------------------------------------------------------+ -| 82ec580fe6d36ae4f81cae3c73f4a5b3b5a09c943172dc9053c69fd8e18dca1e | -+------------------------------------------------------------------+ -1 row in set (0.009 sec) - -MySQL > select sm3sum("ab","cd"); -+------------------------------------------------------------------+ -| sm3sum('ab', 'cd') | -+------------------------------------------------------------------+ -| 82ec580fe6d36ae4f81cae3c73f4a5b3b5a09c943172dc9053c69fd8e18dca1e | -+------------------------------------------------------------------+ -1 row in set (0.009 sec) - -``` - -### keywords - - SM3SUM \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sm4.md b/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sm4.md deleted file mode 100644 index ec1f05f20db968..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/encrypt-digest-functions/sm4.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -{ -"title": "SM4", -"language": "en" -} ---- - - - -## SM4_ENCRYPT - -### description - -#### Syntax - -`VARCHAR SM4_ENCRYPT(str,key_str[,init_vector])` - -return encrypted result - -### example - -``` -MySQL > select TO_BASE64(SM4_ENCRYPT('text','F3229A0B371ED2D9441B830D21A390C3')); -+--------------------------------+ -| to_base64(sm4_encrypt('text')) | -+--------------------------------+ -| aDjwRflBrDjhBZIOFNw3Tg== | -+--------------------------------+ -1 row in set (0.010 sec) - -MySQL > set block_encryption_mode="SM4_128_CBC"; -Query OK, 0 rows affected (0.001 sec) - -MySQL > select to_base64(SM4_ENCRYPT('text','F3229A0B371ED2D9441B830D21A390C3', '0123456789')); -+----------------------------------------------------------------------------------+ -| to_base64(sm4_encrypt('text', 'F3229A0B371ED2D9441B830D21A390C3', '0123456789')) | -+----------------------------------------------------------------------------------+ -| G7yqOKfEyxdagboz6Qf01A== | -+----------------------------------------------------------------------------------+ -1 row in set (0.014 sec) -``` - -### keywords - - SM4_ENCRYPT - -## SM4_DECRYPT - -### description - -#### Syntax - -`VARCHAR SM4_DECRYPT(str,key_str[,init_vector])` - -Return the decrypted result - -### example - -``` -MySQL [(none)]> select SM4_DECRYPT(FROM_BASE64('aDjwRflBrDjhBZIOFNw3Tg=='),'F3229A0B371ED2D9441B830D21A390C3'); -+------------------------------------------------------+ -| sm4_decrypt(from_base64('aDjwRflBrDjhBZIOFNw3Tg==')) | -+------------------------------------------------------+ -| text | -+------------------------------------------------------+ -1 row in set (0.009 sec) - -MySQL> set block_encryption_mode="SM4_128_CBC"; -Query OK, 0 rows affected (0.006 sec) - -MySQL > select SM4_DECRYPT(FROM_BASE64('G7yqOKfEyxdagboz6Qf01A=='),'F3229A0B371ED2D9441B830D21A390C3', '0123456789'); -+--------------------------------------------------------------------------------------------------------+ -| sm4_decrypt(from_base64('G7yqOKfEyxdagboz6Qf01A=='), 'F3229A0B371ED2D9441B830D21A390C3', '0123456789') | -+--------------------------------------------------------------------------------------------------------+ -| text | -+--------------------------------------------------------------------------------------------------------+ -1 row in set (0.012 sec) -``` - -### keywords - - SM4_DECRYPT diff --git a/docs/en/docs/sql-manual/sql-functions/hash-functions/murmur-hash3-32.md b/docs/en/docs/sql-manual/sql-functions/hash-functions/murmur-hash3-32.md deleted file mode 100644 index 051a5c262ff5ac..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/hash-functions/murmur-hash3-32.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -{ - "title": "MURMUR_HASH3_32", - "language": "en" -} ---- - - - -## murmur_hash3_32 - -### description -#### Syntax - -`INT MURMUR_HASH3_32(VARCHAR input, ...)` - -Return the 32 bits murmur3 hash of input string. - -Note: When calculating hash values, it is more recommended to use `xxhash_32` instead of `murmur_hash3_32`. - -### example - -``` -mysql> select murmur_hash3_32(null); -+-----------------------+ -| murmur_hash3_32(NULL) | -+-----------------------+ -| NULL | -+-----------------------+ - -mysql> select murmur_hash3_32("hello"); -+--------------------------+ -| murmur_hash3_32('hello') | -+--------------------------+ -| 1321743225 | -+--------------------------+ - -mysql> select murmur_hash3_32("hello", "world"); -+-----------------------------------+ -| murmur_hash3_32('hello', 'world') | -+-----------------------------------+ -| 984713481 | -+-----------------------------------+ -``` - -### keywords - - MURMUR_HASH3_32,HASH diff --git a/docs/en/docs/sql-manual/sql-functions/hash-functions/murmur-hash3-64.md b/docs/en/docs/sql-manual/sql-functions/hash-functions/murmur-hash3-64.md deleted file mode 100644 index fb9d1dd62173c8..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/hash-functions/murmur-hash3-64.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -{ - "title": "MURMUR_HASH3_64", - "language": "en" -} ---- - - - -## murmur_hash3_64 - -### description -#### Syntax - -`BIGINT MURMUR_HASH3_64(VARCHAR input, ...)` - -Return the 64 bits murmur3 hash of input string. - -Note: When calculating hash values, it is more recommended to use `xxhash_64` instead of `murmur_hash3_64`. - -### example - -``` -mysql> select murmur_hash3_64(null); -+-----------------------+ -| murmur_hash3_64(NULL) | -+-----------------------+ -| NULL | -+-----------------------+ - -mysql> select murmur_hash3_64("hello"); -+--------------------------+ -| murmur_hash3_64('hello') | -+--------------------------+ -| -3215607508166160593 | -+--------------------------+ - -mysql> select murmur_hash3_64("hello", "world"); -+-----------------------------------+ -| murmur_hash3_64('hello', 'world') | -+-----------------------------------+ -| 3583109472027628045 | -+-----------------------------------+ -``` - -### keywords - - MURMUR_HASH3_64,HASH diff --git a/docs/en/docs/sql-manual/sql-functions/hash-functions/xxhash-32.md b/docs/en/docs/sql-manual/sql-functions/hash-functions/xxhash-32.md deleted file mode 100644 index 3707d7a70c3045..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/hash-functions/xxhash-32.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -{ - "title": "XXHASH_32", - "language": "en" -} ---- - - - -## xxhash_32 - -### description -#### Syntax - -`INT XXHASH_32(VARCHAR input, ...)` - -Return the 32 bits xxhash of input string. - -Note: When calculating hash values, it is more recommended to use `xxhash_32` instead of `murmur_hash3_32`. - -### example - -``` -mysql> select xxhash_32(NULL); -+-----------------+ -| xxhash_32(NULL) | -+-----------------+ -| NULL | -+-----------------+ - -mysql> select xxhash_32("hello"); -+--------------------+ -| xxhash_32('hello') | -+--------------------+ -| -83855367 | -+--------------------+ - -mysql> select xxhash_32("hello", "world"); -+-----------------------------+ -| xxhash_32('hello', 'world') | -+-----------------------------+ -| -920844969 | -+-----------------------------+ -``` - -### keywords - -XXHASH_32,HASH diff --git a/docs/en/docs/sql-manual/sql-functions/hash-functions/xxhash-64.md b/docs/en/docs/sql-manual/sql-functions/hash-functions/xxhash-64.md deleted file mode 100644 index 506613177e9f63..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/hash-functions/xxhash-64.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -{ - "title": "XXHASH_64", - "language": "en" -} ---- - - - -## xxhash_64 - -### description -#### Syntax - -`BIGINT XXHASH_64(VARCHAR input, ...)` - -Return the 64 bits xxhash of input string. - -Note: When calculating hash values, it is more recommended to use `xxhash_64` instead of `murmur_hash3_64`. - -### example - -``` -mysql> select xxhash_64(NULL); -+-----------------+ -| xxhash_64(NULL) | -+-----------------+ -| NULL | -+-----------------+ - -mysql> select xxhash_64("hello"); -+----------------------+ -| xxhash_64('hello') | -+----------------------+ -| -7685981735718036227 | -+----------------------+ - -mysql> select xxhash_64("hello", "world"); -+-----------------------------+ -| xxhash_64('hello', 'world') | -+-----------------------------+ -| 7001965798170371843 | -+-----------------------------+ -``` - -### benchmark - -Through TPCH Benchmark testing, it was found that `xxhash_64` has significantly improved performance compared to `murmur_hash3_64`. Therefore, in scenarios where hash values need to be calculated, it is more recommended to use `xxhash_64`. - -``` -mysql> select count(murmur_hash3_64(l_comment)) from lineitem; -+-----------------------------------+ -| count(murmur_hash3_64(l_comment)) | -+-----------------------------------+ -| 600037902 | -+-----------------------------------+ -1 row in set (17.18 sec) - -mysql> select count(xxhash_64(l_comment)) from lineitem; -+-----------------------------+ -| count(xxhash_64(l_comment)) | -+-----------------------------+ -| 600037902 | -+-----------------------------+ -1 row in set (8.41 sec) -``` - -### keywords - -XXHASH_64,HASH diff --git a/docs/en/docs/sql-manual/sql-functions/hll-functions/hll-cardinality.md b/docs/en/docs/sql-manual/sql-functions/hll-functions/hll-cardinality.md deleted file mode 100644 index db1b7437ca3957..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/hll-functions/hll-cardinality.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -{ - "title": "HLL_CARDINALITY", - "language": "en" -} ---- - - - -## HLL_CARDINALITY -### description -#### Syntax - -`HLL_CARDINALITY(hll)` - -HLL_CARDINALITY is used to calculate the cardinality of a single HLL type value. - -### example -``` -MySQL > select HLL_CARDINALITY(uv_set) from test_uv; -+---------------------------+ -| hll_cardinality(`uv_set`) | -+---------------------------+ -| 3 | -+---------------------------+ -``` -### keywords -HLL,HLL_CARDINALITY diff --git a/docs/en/docs/sql-manual/sql-functions/hll-functions/hll-empty.md b/docs/en/docs/sql-manual/sql-functions/hll-functions/hll-empty.md deleted file mode 100644 index 776a7c3722ec14..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/hll-functions/hll-empty.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -{ - "title": "HLL_EMPTY", - "language": "en" -} ---- - - - -## HLL_EMPTY -### description -#### Syntax - -`HLL_EMPTY(value)` - -HLL_EMPTY returns a null value of type hll. - -### example -``` -MySQL > select hll_cardinality(hll_empty()); -+------------------------------+ -| hll_cardinality(hll_empty()) | -+------------------------------+ -| 0 | -+------------------------------+ -``` -### keywords -HLL,HLL_EMPTY diff --git a/docs/en/docs/sql-manual/sql-functions/hll-functions/hll-hash.md b/docs/en/docs/sql-manual/sql-functions/hll-functions/hll-hash.md deleted file mode 100644 index b174e6236e8f93..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/hll-functions/hll-hash.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -{ - "title": "HLL_HASH", - "language": "en" -} ---- - - - -## HLL_HASH -### description -#### Syntax - -`HLL_HASH(value)` - -HLL_HASH converts a value to hll type. -Typically used when loading data. - -### example -``` -MySQL > select HLL_CARDINALITY(HLL_HASH('abc')); -+----------------------------------+ -| hll_cardinality(HLL_HASH('abc')) | -+----------------------------------+ -| 1 | -+----------------------------------+ -``` -### keywords -HLL,HLL_HASH diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/inet-aton.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/inet-aton.md deleted file mode 100644 index 56f9308f347720..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/inet-aton.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -{ -"title": "INET_ATON", -"language": "en" -} ---- - - - -## INET_ATON - - - -inet_aton - - - -### description - -#### Syntax - -`BIGINT INET_ATON(VARCHAR ipv4_string)` - -Takes a string containing an IPv4 address in the format A.B.C.D (dot-separated numbers in decimal form). Returns a BIGINT number representing the corresponding IPv4 address in big endian. - -### notice - -`It is the alias of ipv4_string_to_num_or_null. -It will return NULL if the input string is not a valid IP address or NULL, which is the same with MySQL` - -### example -``` -mysql> select inet_aton('192.168.0.1'); -+-------------------------------------------+ -| ipv4_string_to_num_or_null('192.168.0.1') | -+-------------------------------------------+ -| 3232235521 | -+-------------------------------------------+ -1 row in set (0.01 sec) - -mysql> SELECT inet_aton('192.168'); -+---------------------------------------+ -| ipv4_string_to_num_or_null('192.168') | -+---------------------------------------+ -| NULL | -+---------------------------------------+ -1 row in set (0.01 sec) -``` - -### keywords - -INET_ATON, IP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/inet-ntoa.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/inet-ntoa.md deleted file mode 100644 index da8876c1159095..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/inet-ntoa.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -{ -"title": "INET_NTOA", -"language": "en" -} ---- - - - -## INET_NTOA - - - -INET_NTOA - - - -### description - -#### Syntax - -`VARCHAR INET_NTOA(BIGINT ipv4_num)` - -Takes a Int16、Int32、Int64 number. Interprets it as an IPv4 address in big endian. Returns a string containing the corresponding IPv4 address in the format A.B.C.d (dot-separated numbers in decimal form). -### notice - -`will return NULL if the input parameter is negative or larger than 4294967295(num value of '255.255.255.255')` - -### example - -``` -mysql> select inet_ntoa(3232235521); -+-----------------------------+ -| ipv4numtostring(3232235521) | -+-----------------------------+ -| 192.168.0.1 | -+-----------------------------+ -1 row in set (0.01 sec) - -mysql> select num,inet_ntoa(num) from ipv4_bi; -+------------+------------------------+ -| num | ipv4numtostring(`num`) | -+------------+------------------------+ -| -1 | NULL | -| 0 | 0.0.0.0 | -| 2130706433 | 127.0.0.1 | -| 4294967295 | 255.255.255.255 | -| 4294967296 | NULL | -+------------+------------------------+ -7 rows in set (0.01 sec) -``` - -### keywords - -INET_NTOA, IP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/inet6-aton.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/inet6-aton.md deleted file mode 100644 index 25f60b2cb8c646..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/inet6-aton.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -{ -"title": "INET6_ATON", -"language": "en" -} ---- - - - -## INET6_ATON - - - -inet6_aton - - - -### description - -#### Syntax - -`VARCHAR INET6_ATON(VARCHAR ipv6_string)` - -The reverse function of IPv6NumToString, it takes an IP address String and returns an IPv6 address in binary format. -If the input string contains a valid IPv4 address, returns its IPv6 equivalent. - -### notice - -`It is the alias of ipv6_string_to_num_or_null. -It will return NULL if the input string is not a valid IP address or NULL, which is the same with MySQL` - -### example -``` -mysql> select hex(inet6_aton('1111::ffff')); -+-----------------------------------------------+ -| hex(ipv6_string_to_num_or_null('1111::ffff')) | -+-----------------------------------------------+ -| 1111000000000000000000000000FFFF | -+-----------------------------------------------+ -1 row in set (0.02 sec) - -mysql> select hex(inet6_aton('192.168.0.1')); -+------------------------------------------------+ -| hex(ipv6_string_to_num_or_null('192.168.0.1')) | -+------------------------------------------------+ -| 00000000000000000000FFFFC0A80001 | -+------------------------------------------------+ -1 row in set (0.02 sec) - -mysql> select hex(inet6_aton('notaaddress')); -+--------------------------------------------------+ -| hex(ipv6_string_to_num_or_null('notaaddress')) | -+--------------------------------------------------+ -| NULL | -+--------------------------------------------------+ -1 row in set (0.02 sec) -``` - -### keywords - -INET6_ATON, IP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/inet6-ntoa.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/inet6-ntoa.md deleted file mode 100644 index 638800ec44e2a5..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/inet6-ntoa.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ -"title": "INET6_NTOA", -"language": "en" -} ---- - - - -## INET6_NTOA - - - -INET6_NTOA - - - -### description - -#### Syntax - -`VARCHAR INET6_NTOA(VARCHAR ipv6_num)` - -Takes an IPv6 address in binary format of type String. Returns the string of this address in text format. -The IPv4 address mapped by IPv6 starts with ::ffff:111.222.33. - -### example - -``` -mysql> select inet6_ntoa(unhex('2A0206B8000000000000000000000011')) as addr; -+--------------+ -| addr | -+--------------+ -| 2a02:6b8::11 | -+--------------+ -1 row in set (0.01 sec) -``` - -### keywords - -INET6_NTOA, IP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-cidr-to-range.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-cidr-to-range.md deleted file mode 100644 index f5367a577f0599..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-cidr-to-range.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -{ -"title": "IPV4_CIDR_TO_RANGE", -"language": "en" -} ---- - - - -## IPV4_CIDR_TO_RANGE - - - -IPV4_CIDR_TO_RANGE - - - -### description - -#### Syntax - -`STRUCT IPV4_CIDR_TO_RANGE(IPV4 ip_v4, INT16 cidr)` - -Receive an IPv4 and an Int16 value containing CIDR. Returns a struct that contains two IPv4 fields representing the lower range (min) and higher range (max) of the subnet, respectively. - -### notice - -`If the input parameter is NULL, return NULL, indicating invalid input` - -### example - -``` -mysql> SELECT ipv4_cidr_to_range(ipv4_string_to_num('192.168.5.2'), 16); -+-----------------------------------------------------------+ -| ipv4_cidr_to_range(ipv4_string_to_num('192.168.5.2'), 16) | -+-----------------------------------------------------------+ -| {"min": "192.168.0.0", "max": "192.168.255.255"} | -+-----------------------------------------------------------+ - -mysql> SELECT ipv4_cidr_to_range(to_ipv4('192.168.5.2'), 16); -+--------------------------------------------------+ -| ipv4_cidr_to_range(to_ipv4('192.168.5.2'), 16) | -+--------------------------------------------------+ -| {"min": "192.168.0.0", "max": "192.168.255.255"} | -+--------------------------------------------------+ - -mysql> SELECT ipv4_cidr_to_range(NULL, NULL); -+--------------------------------+ -| ipv4_cidr_to_range(NULL, NULL) | -+--------------------------------+ -| NULL | -+--------------------------------+ -``` - -### keywords - -IPV4_CIDR_TO_RANGE, IP diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-num-to-string.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-num-to-string.md deleted file mode 100644 index 6e30c901a80f96..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-num-to-string.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -{ -"title": "IPV4_NUM_TO_STRING", -"language": "en" -} ---- - - - -## IPV4_NUM_TO_STRING - - - -IPV4_NUM_TO_STRING - - - -### description - -#### Syntax - -`VARCHAR IPV4_NUM_TO_STRING(BIGINT ipv4_num)` - -Takes a Int16、Int32、Int64 number. Interprets it as an IPv4 address in big endian. Returns a string containing the corresponding IPv4 address in the format A.B.C.d (dot-separated numbers in decimal form). -### notice - -`will return NULL if the input parameter is negative or larger than 4294967295(num value of '255.255.255.255')` - -### example - -``` -mysql> select ipv4_num_to_string(3232235521); -+--------------------------------+ -| ipv4_num_to_string(3232235521) | -+--------------------------------+ -| 192.168.0.1 | -+--------------------------------+ -1 row in set (0.01 sec) - -mysql> select num,ipv4_num_to_string(num) from ipv4_bi; -+------------+---------------------------+ -| num | ipv4_num_to_string(`num`) | -+------------+---------------------------+ -| -1 | NULL | -| 0 | 0.0.0.0 | -| 2130706433 | 127.0.0.1 | -| 4294967295 | 255.255.255.255 | -| 4294967296 | NULL | -+------------+---------------------------+ -7 rows in set (0.01 sec) -``` - -### keywords - -IPV4_NUM_TO_STRING, IP diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-string-to-num-or-default.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-string-to-num-or-default.md deleted file mode 100644 index 880cda28a5490a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-string-to-num-or-default.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -{ -"title": "IPV4_STRING_TO_NUM_OR_DEFAULT", -"language": "en" -} ---- - - - -## IPV4_STRING_TO_NUM_OR_DEFAULT - - - -IPV4_STRING_TO_NUM_OR_DEFAULT - - - -### description - -#### Syntax - -`BIGINT IPV4_STRING_TO_NUM_OR_DEFAULT(VARCHAR ipv4_string)` - -Takes a string containing an IPv4 address in the format A.B.C.D (dot-separated numbers in decimal form). Returns a BIGINT number representing the corresponding IPv4 address in big endian. - -### notice - -`will return 0 if the input parameter is invalid ipv4 value or NULL` - -### example -``` -mysql> select ipv4_string_to_num_or_default('192.168.0.1'); -+----------------------------------------------+ -| ipv4_string_to_num_or_default('192.168.0.1') | -+----------------------------------------------+ -| 3232235521 | -+----------------------------------------------+ -1 row in set (0.01 sec) - -mysql> select str, ipv4_string_to_num_or_default(str) from ipv4_str; -+-----------------+------------------------------------+ -|str | ipv4_string_to_num_or_default(str) | -+-----------------+------------------------------------+ -| 0.0.0.0 | 0 | -| 127.0.0.1 | 2130706433 | -| 255.255.255.255 | 4294967295 | -| invalid | 0 | -+-----------------+------------------------------------+ -4 rows in set (0.01 sec) - -mysql> select addr_src, ipv4_string_to_num_or_default(addr_src) from ipv4_string_test where addr_src is null; -+----------+-----------------------------------------+ -| addr_src | ipv4_string_to_num_or_default(addr_src) | -+----------+-----------------------------------------+ -| NULL | 0 | -+----------+-----------------------------------------+ -1 row in set (0.09 sec) -``` - -### keywords - -IPV4_STRING_TO_NUM_OR_DEFAULT, IP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-string-to-num-or-null.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-string-to-num-or-null.md deleted file mode 100644 index aa24a9523b4811..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-string-to-num-or-null.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -{ -"title": "IPV4_STRING_TO_NUM_OR_NULL", -"language": "en" -} ---- - - - -## IPV4_STRING_TO_NUM_OR_NULL - - - -IPV4_STRING_TO_NUM_OR_NULL - - - -### description - -#### Syntax - -`BIGINT IPV4_STRING_TO_NUM_OR_NULL(VARCHAR ipv4_string)` - -Takes a string containing an IPv4 address in the format A.B.C.D (dot-separated numbers in decimal form). Returns a BIGINT number representing the corresponding IPv4 address in big endian. - -### notice - -`will return null if the input parameter is invalid ipv4 value` - -### example -``` -mysql> select ipv4_string_to_num_or_null('192.168.0.1'); -+-------------------------------------------+ -| ipv4_string_to_num_or_null('192.168.0.1') | -+-------------------------------------------+ -| 3232235521 | -+-------------------------------------------+ -1 row in set (0.01 sec) - -mysql> select str, ipv4_string_to_num_or_null(str) from ipv4_str; -+-----------------+---------------------------------+ -|str | ipv4_string_to_num_or_null(str) | -+-----------------+---------------------------------+ -| 0.0.0.0 | 0 | -| 127.0.0.1 | 2130706433 | -| 255.255.255.255 | 4294967295 | -| invalid | NULL | -+-----------------+---------------------------------+ -4 rows in set (0.01 sec) -``` - -### keywords - -IPV4_STRING_TO_NUM_OR_NULL, IP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-string-to-num.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-string-to-num.md deleted file mode 100644 index ed69ede28608c0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv4-string-to-num.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -{ -"title": "IPV4_STRING_TO_NUM", -"language": "en" -} ---- - - - -## IPV4_STRING_TO_NUM - - - -IPV4_STRING_TO_NUM - - - -### description - -#### Syntax - -`BIGINT IPV4_STRING_TO_NUM(VARCHAR ipv4_string)` - -Takes a string containing an IPv4 address in the format A.B.C.D (dot-separated numbers in decimal form). Returns a BIGINT number representing the corresponding IPv4 address in big endian. - -### notice - -`will return an error if the input string is not a valid IPv4 address or NULL` - -### example -``` -mysql> select ipv4_string_to_num('192.168.0.1'); -+-----------------------------------+ -| ipv4_string_to_num('192.168.0.1') | -+-----------------------------------+ -| 3232235521 | -+-----------------------------------+ -1 row in set (0.01 sec) - -mysql> select ipv4_string_to_num('invalid'); -ERROR 1105 (HY000): errCode = 2, detailMessage = (172.17.0.2)[CANCELLED][INVALID_ARGUMENT][E33] Invalid IPv4 value - -mysql> select addr_src, ipv4_string_to_num(addr_src) from ipv4_string_test where addr_src is null; -ERROR 1105 (HY000): errCode = 2, detailMessage = (172.17.0.2)[CANCELLED][E33] Null Input, you may consider convert it to a valid default IPv4 value like '0.0.0.0' first -``` - -### keywords - -IPV4_STRING_TO_NUM, IP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-cidr-to-range.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-cidr-to-range.md deleted file mode 100644 index bf5c74c167d272..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-cidr-to-range.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -{ -"title": "IPV6_CIDR_TO_RANGE", -"language": "en" -} ---- - - - -## IPV6_CIDR_TO_RANGE - - - -IPV6_CIDR_TO_RANGE - - - -### description - -#### Syntax - -`STRUCT IPV6_CIDR_TO_RANGE(IPV6 ip_v6, INT16 cidr)` - -Receive an IPv6 and an Int16 value containing CIDR. Returns a struct that contains two IPv6 fields representing the lower range (min) and higher range (max) of the subnet, respectively. - -### notice - -`If the input parameter is NULL, return NULL, indicating invalid input` - -### example - -``` -mysql> SELECT ipv6_cidr_to_range(ipv6_string_to_num('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); -+---------------------------------------------------------------------------------------+ -| ipv6_cidr_to_range(ipv6_string_to_num('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32) | -+---------------------------------------------------------------------------------------+ -| {"min": "2001:db8::", "max": "2001:db8:ffff:ffff:ffff:ffff:ffff:ffff"} | -+---------------------------------------------------------------------------------------+ - -mysql> SELECT ipv6_cidr_to_range(to_ipv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); -+----------------------------------------------------------------------------+ -| ipv6_cidr_to_range(to_ipv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32) | -+----------------------------------------------------------------------------+ -| {"min": "2001:db8::", "max": "2001:db8:ffff:ffff:ffff:ffff:ffff:ffff"} | -+----------------------------------------------------------------------------+ - -mysql> SELECT ipv6_cidr_to_range(NULL, NULL); -+--------------------------------+ -| ipv6_cidr_to_range(NULL, NULL) | -+--------------------------------+ -| NULL | -+--------------------------------+ -``` - -### keywords - -IPV6_CIDR_TO_RANGE, IP diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-num-to-string.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-num-to-string.md deleted file mode 100644 index 9e01a67cb1c048..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-num-to-string.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ -"title": "IPV6_NUM_TO_STRING", -"language": "en" -} ---- - - - -## IPV6_NUM_TO_STRING - - - -IPV6_NUM_TO_STRING - - - -### description - -#### Syntax - -`VARCHAR IPV6_NUM_TO_STRING(VARCHAR ipv6_num)` - -Takes an IPv6 address in binary format of type String. Returns the string of this address in text format. -The IPv4 address mapped by IPv6 starts with ::ffff:111.222.33. - -### example - -``` -mysql> select ipv6_num_to_string(unhex('2A0206B8000000000000000000000011')) as addr; -+--------------+ -| addr | -+--------------+ -| 2a02:6b8::11 | -+--------------+ -1 row in set (0.01 sec) -``` - -### keywords - -IPV6_NUM_TO_STRING, IP diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-string-to-num-or-default.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-string-to-num-or-default.md deleted file mode 100644 index 30c3455adf677c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-string-to-num-or-default.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -{ -"title": "IPV6_STRING_TO_NUM_OR_DEFAULT", -"language": "en" -} ---- - - - -## IPV6_STRING_TO_NUM_OR_DEFAULT - - - -IPV6_STRING_TO_NUM_OR_DEFAULT - - - -### description - -#### Syntax - -`VARCHAR IPV6_STRING_TO_NUM_OR_DEFAULT(VARCHAR ipv6_string)` - -The reverse function of IPv6NumToString, it takes an IP address String and returns an IPv6 address in binary format. -If the input string contains a valid IPv4 address, returns its IPv6 equivalent. - -### notice - -`will return zero if the input string is not a valid IP address` - -### example -``` -mysql> select hex(ipv6_string_to_num_or_default('1111::ffff')); -+--------------------------------------------------+ -| hex(ipv6_string_to_num_or_default('1111::ffff')) | -+--------------------------------------------------+ -| 1111000000000000000000000000FFFF | -+--------------------------------------------------+ -1 row in set (0.01 sec) - -mysql> select hex(ipv6_string_to_num_or_default('192.168.0.1')); -+---------------------------------------------------+ -| hex(ipv6_string_to_num_or_default('192.168.0.1')) | -+---------------------------------------------------+ -| 00000000000000000000FFFFC0A80001 | -+---------------------------------------------------+ -1 row in set (0.02 sec) - -mysql> select hex(ipv6_string_to_num_or_default('notaaddress')); -+---------------------------------------------------+ -| hex(ipv6_string_to_num_or_default('notaaddress')) | -+---------------------------------------------------+ -| 00000000000000000000000000000000 | -+---------------------------------------------------+ -1 row in set (0.02 sec) -``` - -### keywords - -IPV6_STRING_TO_NUM_OR_DEFAULT, IP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-string-to-num-or-null.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-string-to-num-or-null.md deleted file mode 100644 index 9df5f11414c3d7..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-string-to-num-or-null.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -{ -"title": "IPV6_STRING_TO_NUM_OR_NULL", -"language": "en" -} ---- - - - -## IPV6_STRING_TO_NUM_OR_NULL - - - -IPV6_STRING_TO_NUM_OR_NULL - - - -### description - -#### Syntax - -`VARCHAR IPV6_STRING_TO_NUM_OR_NULL(VARCHAR ipv6_string)` - -The reverse function of IPv6NumToString, it takes an IP address String and returns an IPv6 address in binary format. -If the input string contains a valid IPv4 address, returns its IPv6 equivalent. - -### notice - -`will return NULL if the input string is not a valid IP address` - -### example -``` -mysql> select hex(ipv6_string_to_num_or_null('1111::ffff')); -+-----------------------------------------------+ -| hex(ipv6_string_to_num_or_null('1111::ffff')) | -+-----------------------------------------------+ -| 1111000000000000000000000000FFFF | -+-----------------------------------------------+ -1 row in set (0.01 sec) - -mysql> select hex(ipv6_string_to_num_or_null('192.168.0.1')); -+------------------------------------------------+ -| hex(ipv6_string_to_num_or_null('192.168.0.1')) | -+------------------------------------------------+ -| 00000000000000000000FFFFC0A80001 | -+------------------------------------------------+ -1 row in set (0.02 sec) - -mysql> select hex(ipv6_string_to_num_or_null('notaaddress')); -+------------------------------------------------+ -| hex(ipv6_string_to_num_or_null('notaaddress')) | -+------------------------------------------------+ -| NULL | -+------------------------------------------------+ -1 row in set (0.02 sec) -``` - -### keywords - -IPV6_STRING_TO_NUM_OR_NULL, IP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-string-to-num.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-string-to-num.md deleted file mode 100644 index 68f7738d04f95b..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/ipv6-string-to-num.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -{ -"title": "IPV6_STRING_TO_NUM", -"language": "en" -} ---- - - - -## IPV6_STRING_TO_NUM - - - -IPV6_STRING_TO_NUM - - - -### description - -#### Syntax - -`VARCHAR IPV6_STRING_TO_NUM(VARCHAR ipv6_string)` - -The reverse function of IPv6NumToString, it takes an IP address String and returns an IPv6 address in binary format. -If the input string contains a valid IPv4 address, returns its IPv6 equivalent. - -### notice - -`will return an error if the input string is not a valid IP address or NULL` - -### example -``` -mysql> select hex(ipv6_string_to_num('1111::ffff')); -+---------------------------------------+ -| hex(ipv6_string_to_num('1111::ffff')) | -+---------------------------------------+ -| 1111000000000000000000000000FFFF | -+---------------------------------------+ -1 row in set (0.02 sec) - -mysql> select hex(ipv6_string_to_num('192.168.0.1')); -+----------------------------------------+ -| hex(ipv6_string_to_num('192.168.0.1')) | -+----------------------------------------+ -| 00000000000000000000FFFFC0A80001 | -+----------------------------------------+ -1 row in set (0.02 sec) - -mysql> select hex(ipv6_string_to_num('notaaddress')); -ERROR 1105 (HY000): errCode = 2, detailMessage = (172.17.0.2)[CANCELLED][E33] Invalid IPv6 value - -mysql> select addr_src, hex(ipv6_string_to_num(addr_src)) from ipv4_string_test where addr_src is null; -ERROR 1105 (HY000): errCode = 2, detailMessage = (172.17.0.2)[CANCELLED][E33] Null Input, you may consider convert it to a valid default IPv6 value like '::' first -``` - -### keywords - -IPV6_STRING_TO_NUM, IP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ip-address-in-range.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ip-address-in-range.md deleted file mode 100644 index 7a47537f85526a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ip-address-in-range.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -{ -"title": "IS_IP_ADDRESS_IN_RANGE", -"language": "en" -} ---- - - - -## IS_IP_ADDRESS_IN_RANGE - - - -IS_IP_ADDRESS_IN_RANGE - - - -### description - -#### Syntax - -`BOOLEAN IS_IP_ADDRESS_IN_RANGE(STRING ip_str, STRING cidr_prefix)` - -Determine whether the IP (IPv4 or IPv6) address is included in the network represented by CIDR notation. If yes, return true; otherwise, return false. - -### notice - -`ip_str and cidr_prefix both cannot be NULL` - -### example - -``` -mysql> SELECT is_ip_address_in_range('127.0.0.1', '127.0.0.0/8'); -+----------------------------------------------------+ -| is_ip_address_in_range('127.0.0.1', '127.0.0.0/8') | -+----------------------------------------------------+ -| 1 | -+----------------------------------------------------+ - -mysql> SELECT is_ip_address_in_range('::ffff:192.168.0.1', '::ffff:192.168.0.4/128'); -+------------------------------------------------------------------------+ -| is_ip_address_in_range('::ffff:192.168.0.1', '::ffff:192.168.0.4/128') | -+------------------------------------------------------------------------+ -| 0 | -+------------------------------------------------------------------------+ -``` - -### keywords - -IS_IP_ADDRESS_IN_RANGE, IP diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ipv4-compat.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ipv4-compat.md deleted file mode 100644 index 84c5aff9b63546..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ipv4-compat.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -{ -"title": "IS_IPV4_COMPAT", -"language": "en" -} ---- - - - -## IS_IPV4_COMPAT - - - -IS_IPV4_COMPAT - - - -### description - -#### Syntax - -`VARCHAR IS_IPV4_COMPAT(INET6_ATON(VARCHAR ipv4_addr))` - -This function takes an IPv6 address represented in numeric form as a binary string, as returned by INET6_ATON(). -It returns 1 if the argument is a valid IPv4-compatible IPv6 address, 0 otherwise (unless expr is NULL, in which case the function returns NULL). -IPv4-compatible addresses have the form ::ipv4_address. - -### example - -``` -mysql> SELECT IS_IPV4_COMPAT(INET6_ATON('::ffff:10.0.5.9')) AS is_result; -+-----------+ -| is_result | -+-----------+ -| 0 | -+-----------+ -1 row in set (0.02 sec) - -mysql> SELECT IS_IPV4_COMPAT(INET6_ATON('::10.0.5.9')) AS is_result; -+-----------+ -| is_result | -+-----------+ -| 1 | -+-----------+ -1 row in set (0.03 sec) -``` - -### keywords - -IS_IPV4_COMPAT, IP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ipv4-mapped.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ipv4-mapped.md deleted file mode 100644 index 9e338eb13539ad..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ipv4-mapped.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -{ -"title": "IS_IPV4_MAPPED", -"language": "en" -} ---- - - - -## IS_IPV4_MAPPED - - - -IS_IPV4_MAPPED - - - -### description - -#### Syntax - -`VARCHAR IS_IPV4_MAPPED(INET6_ATON(VARCHAR ipv4_addr))` - -This function takes an IPv6 address represented in numeric form as a binary string, as returned by INET6_ATON(). -It returns 1 if the argument is a valid IPv4-mapped IPv6 address, 0 otherwise, unless expr is NULL, in which case the function returns NULL. -IPv4-mapped addresses have the form ::ffff:ipv4_address. - -### notice - -`When the source input doesn't have a prefix of '::ffff:', but if it's still a valid ipv4 address, this result will also be 1 for the reason that the INET6_ATON() automatically adds the prefix for it.` - -### example - -``` -mysql> SELECT IS_IPV4_MAPPED(INET6_ATON('::ffff:10.0.5.9')) AS is_result; -+-----------+ -| is_result | -+-----------+ -| 1 | -+-----------+ -1 row in set (0.02 sec) - -mysql> SELECT IS_IPV4_MAPPED(INET6_ATON('::10.0.5.9')) AS is_result; -+-----------+ -| is_result | -+-----------+ -| 0 | -+-----------+ -1 row in set (0.03 sec) -``` - -### keywords - -IS_IPV4_MAPPED, IP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ipv4-string.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ipv4-string.md deleted file mode 100644 index 9aca93afc0acab..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ipv4-string.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -{ -"title": "IS_IPV4_STRING", -"language": "en" -} ---- - - - -## IS_IPV4_STRING - - - -IS_IPV4_STRING - - - -### description - -#### Syntax - -`BOOLEAN IS_IPV4_STRING(STRING ipv4_str)` - -Receive an IPv4 address in the form of a string as a parameter. If it is a correctly formatted and valid IPv4 address, return true; On the contrary, return false. - -### notice - -`If the input parameter is NULL, return NULL, indicating invalid input` - -### example - -``` -mysql> select is_ipv4_string(NULL); -+----------------------+ -| is_ipv4_string(NULL) | -+----------------------+ -| NULL | -+----------------------+ - -mysql> CREATE TABLE `test_is_ipv4_string` ( - `id` int, - `ip_v4` string - ) ENGINE=OLAP - DISTRIBUTED BY HASH(`id`) BUCKETS 4 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1" - ); - -mysql> insert into test_is_ipv4_string values(0, NULL), (1, '0.0.0.'), (2, ''), (3, '.'), (4, '255.255.255.255'); - -mysql> select id, is_ipv4_string(ip_v4) from test_is_ipv4_string order by id; -+------+-----------------------+ -| id | is_ipv4_string(ip_v4) | -+------+-----------------------+ -| 0 | NULL | -| 1 | 0 | -| 2 | 0 | -| 3 | 0 | -| 4 | 1 | -+------+-----------------------+ -``` - -### keywords - -IS_IPV4_STRING, IP diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ipv6-string.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ipv6-string.md deleted file mode 100644 index 6f3731e26181da..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/is-ipv6-string.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -{ -"title": "IS_IPV6_STRING", -"language": "en" -} ---- - - - -## IS_IPV6_STRING - - - -IS_IPV6_STRING - - - -### description - -#### Syntax - -`BOOLEAN IS_IPV6_STRING(STRING ipv6_str)` - -Receive an IPv6 address in the form of a string as a parameter, and return true if it is a properly formatted and valid IPv6 address; On the contrary, return false. - -### notice - -`If the input parameter is NULL, return NULL, indicating invalid input` - -### example - -``` -mysql> select is_ipv6_string(NULL); -+----------------------+ -| is_ipv6_string(NULL) | -+----------------------+ -| NULL | -+----------------------+ - -mysql> CREATE TABLE `test_is_ipv6_string` ( - `id` int, - `ip_v6` string - ) ENGINE=OLAP - DISTRIBUTED BY HASH(`id`) BUCKETS 4 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1" - ); - -mysql> insert into test_is_ipv6_string values(0, NULL), (1, '::'), (2, ''), (3, '2001:1b70:a1:610::b102:2'), (4, 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffffg'); - -mysql> select id, is_ipv6_string(ip_v6) from test_is_ipv6_string order by id; -+------+-----------------------+ -| id | is_ipv6_string(ip_v6) | -+------+-----------------------+ -| 0 | NULL | -| 1 | 1 | -| 2 | 0 | -| 3 | 1 | -| 4 | 0 | -+------+-----------------------+ -``` - -### keywords - -IS_IPV6_STRING, IP diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv4-or-default.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv4-or-default.md deleted file mode 100644 index d5770906b881ec..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv4-or-default.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -{ -"title": "TO_IPV4_OR_DEFAULT", -"language": "en" -} ---- - - - -## TO_IPV4_OR_DEFAULT - - - -TO_IPV4_OR_DEFAULT - - - -### description - -#### Syntax - -`IPV4 TO_IPV4_OR_DEFAULT(STRING ipv4_str)` - -Same as to_ipv4, but if the IPv4 address has an invalid format, it returns 0.0.0.0 (0 as IPv4). - -### notice - -`If input is NULL, return 0.0.0.0 (0 as IPv4).` - -### example - -``` -mysql> select to_ipv4_or_default('.'); -+-------------------------+ -| to_ipv4_or_default('.') | -+-------------------------+ -| 0.0.0.0 | -+-------------------------+ - -mysql> select to_ipv4_or_default(NULL); -+--------------------------+ -| to_ipv4_or_default(NULL) | -+--------------------------+ -| 0.0.0.0 | -+--------------------------+ -``` - -### keywords - -TO_IPV4_OR_DEFAULT, IP diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv4-or-null.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv4-or-null.md deleted file mode 100644 index 35377b233a73fc..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv4-or-null.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -{ -"title": "TO_IPV4_OR_NULL", -"language": "en" -} ---- - - - -## TO_IPV4_OR_NULL - - - -TO_IPV4_OR_NULL - - - -### description - -#### Syntax - -`IPV4 TO_IPV4_OR_NULL(STRING ipv4_str)` - -Same as to_ipv4, but if the IPv4 address has an invalid format, it returns NULL. - -### notice - -`If input is NULL, return NULL.` - -### example - -``` -mysql> select to_ipv4_or_null('.'); -+----------------------+ -| to_ipv4_or_null('.') | -+----------------------+ -| NULL | -+----------------------+ - -mysql> select to_ipv4_or_null(NULL); -+-----------------------+ -| to_ipv4_or_null(NULL) | -+-----------------------+ -| NULL | -+-----------------------+ -``` - -### keywords - -TO_IPV4_OR_NULL, IP diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv4.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv4.md deleted file mode 100644 index 24747ae0ed051e..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv4.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ -"title": "TO_IPV4", -"language": "en" -} ---- - - - -## TO_IPV4 - - - -TO_IPV4 - - - -### description - -#### Syntax - -`IPV4 TO_IPV4(STRING ipv4_str)` - -This function like ipv4_string_to_num that takes a string form of IPv4 address and returns value of IPv4 type, -which is binary equal to value returned by ipv4_string_to_num. -If the IPv4 address has an invalid format, throw an exception. - -### notice - -`Input cannot be NULL. If it is NULL, an exception will be thrown.` - -### example - -``` -mysql> select to_ipv4('255.255.255.255'); -+----------------------------+ -| to_ipv4('255.255.255.255') | -+----------------------------+ -| 255.255.255.255 | -+----------------------------+ -``` - -### keywords - -TO_IPV4, IP diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv6-or-default.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv6-or-default.md deleted file mode 100644 index c0cc118e9bb9e5..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv6-or-default.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -{ -"title": "TO_IPV6_OR_DEFAULT", -"language": "en" -} ---- - - - -## TO_IPV6_OR_DEFAULT - - - -TO_IPV6_OR_DEFAULT - - - -### description - -#### Syntax - -`IPV6 TO_IPV6_OR_DEFAULT(STRING ipv6_str)` - -Same as to_ipv6, but if the IPv6 address has an invalid format, it returns :: (0 as IPv6). - -### notice - -`If input is NULL, return :: (0 as IPv6).` - -### example - -``` -mysql> select to_ipv6_or_default('.'); -+-------------------------+ -| to_ipv6_or_default('.') | -+-------------------------+ -| :: | -+-------------------------+ - -mysql> select to_ipv6_or_default(NULL); -+--------------------------+ -| to_ipv6_or_default(NULL) | -+--------------------------+ -| :: | -+--------------------------+ -``` - -### keywords - -TO_IPV6_OR_DEFAULT, IP diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv6-or-null.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv6-or-null.md deleted file mode 100644 index 863cb383973670..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv6-or-null.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -{ -"title": "TO_IPV6_OR_NULL", -"language": "en" -} ---- - - - -## TO_IPV6_OR_NULL - - - -TO_IPV6_OR_NULL - - - -### description - -#### Syntax - -`IPV6 TO_IPV6_OR_NULL(STRING ipv6_str)` - -Same as to_ipv6, but if the IPv6 address has an invalid format, it returns NULL. - -### notice - -`If input is NULL, return NULL.` - -### example - -``` -mysql> select to_ipv6_or_null('.'); -+----------------------+ -| to_ipv6_or_null('.') | -+----------------------+ -| NULL | -+----------------------+ - -mysql> select to_ipv6_or_null(NULL); -+-----------------------+ -| to_ipv6_or_null(NULL) | -+-----------------------+ -| NULL | -+-----------------------+ -``` - -### keywords - -TO_IPV6_OR_NULL, IP diff --git a/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv6.md b/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv6.md deleted file mode 100644 index aad7d42b94fd92..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/ip-functions/to-ipv6.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ -"title": "TO_IPV6", -"language": "en" -} ---- - - - -## TO_IPV6 - - - -TO_IPV6 - - - -### description - -#### Syntax - -`IPV6 TO_IPV6(STRING ipv6_str)` - -Convert a string form of IPv6 address to IPv6 type. -If the IPv6 address has an invalid format, throw an exception. -Similar to ipv6_string_to_num function, which converts IPv6 address to binary format. - -### notice - -`Input cannot be NULL. If it is NULL, an exception will be thrown.` - -### example - -``` -mysql> select to_ipv6('::'); -+---------------+ -| to_ipv6('::') | -+---------------+ -| :: | -+---------------+ -``` - -### keywords - -TO_IPV6, IP diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/get-json-bigint.md b/docs/en/docs/sql-manual/sql-functions/json-functions/get-json-bigint.md deleted file mode 100644 index 7eda9c4f0e6e35..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/get-json-bigint.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -{ - "title": "GET_JSON_BIGINT", - "language": "en" -} ---- - - - -## get_json_bigint -### Description -#### Syntax - -`INT get_json_bigint(VARCHAR json_str, VARCHAR json_path)` - - -Parse and retrieve the big integer content of the specified path in the JSON string. -Where json_path must start with the $symbol and use. as the path splitter. If the path contains..., double quotation marks can be used to surround it. -Use [] to denote array subscripts, starting at 0. -The content of path cannot contain ",[and]. -If the json_string format is incorrect, or the json_path format is incorrect, or matches cannot be found, NULL is returned. - -In addition, it is recommended to use the jsonb type and jsonb_extract_XXX function performs the same function. - -### example - -1. Get the value of key as "k1" - -``` -mysql> SELECT get_json_bigint('{"k1":1, "k2":"2"}', "$.k1"); -+-----------------------------------------------+ -| get_json_bigint('{"k1":1, "k2":"2"}', '$.k1') | -+-----------------------------------------------+ -| 1 | -+-----------------------------------------------+ -``` - -2. Get the second element of the array whose key is "my. key" - -``` -mysql> SELECT get_json_bigint('{"k1":"v1", "my.key":[1, 1678708107000, 3]}', '$."my.key"[1]'); -+---------------------------------------------------------------------------------+ -| get_json_bigint('{"k1":"v1", "my.key":[1, 1678708107000, 3]}', '$."my.key"[1]') | -+---------------------------------------------------------------------------------+ -| 1678708107000 | -+---------------------------------------------------------------------------------+ -``` - -3. Get the first element in an array whose secondary path is k1. key - > K2 -``` -mysql> SELECT get_json_bigint('{"k1.key":{"k2":[1678708107000, 2]}}', '$."k1.key".k2[0]'); -+-----------------------------------------------------------------------------+ -| get_json_bigint('{"k1.key":{"k2":[1678708107000, 2]}}', '$."k1.key".k2[0]') | -+-----------------------------------------------------------------------------+ -| 1678708107000 | -+-----------------------------------------------------------------------------+ -``` -### keywords -GET_JSON_BIGINT,GET,JSON,BIGINT diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/get-json-double.md b/docs/en/docs/sql-manual/sql-functions/json-functions/get-json-double.md deleted file mode 100644 index 95ab94bb4d5acb..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/get-json-double.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -{ - "title": "GET_JSON_DOUBLE", - "language": "en" -} ---- - - - -## get_json_double -### description -#### Syntax - -`DOUBLE get_json_double(VARCHAR json_str, VARCHAR json_path)` - - -Parse and get the floating-point content of the specified path in the JSON string. -Where json_path must start with the $symbol and use. as the path splitter. If the path contains..., double quotation marks can be used to surround it. -Use [] to denote array subscripts, starting at 0. -The content of path cannot contain ",[and]. -If the json_string format is incorrect, or the json_path format is incorrect, or matches cannot be found, NULL is returned. - -In addition, it is recommended to use the jsonb type and jsonb_extract_XXX function performs the same function. - -### example - -1. Get the value of key as "k1" - -``` -mysql> SELECT get_json_double('{"k1":1.3, "k2":"2"}', "$.k1"); -+-------------------------------------------------+ -| get_json_double('{"k1":1.3, "k2":"2"}', '$.k1') | -+-------------------------------------------------+ -| 1.3 | -+-------------------------------------------------+ -``` - -2. Get the second element of the array whose key is "my. key" - -``` -mysql> SELECT get_json_double('{"k1":"v1", "my.key":[1.1, 2.2, 3.3]}', '$."my.key"[1]'); -+---------------------------------------------------------------------------+ -| get_json_double('{"k1":"v1", "my.key":[1.1, 2.2, 3.3]}', '$."my.key"[1]') | -+---------------------------------------------------------------------------+ -| 2.2 | -+---------------------------------------------------------------------------+ -``` - -3. Get the first element in an array whose secondary path is k1. key - > K2 -``` -mysql> SELECT get_json_double('{"k1.key":{"k2":[1.1, 2.2]}}', '$."k1.key".k2[0]'); -+---------------------------------------------------------------------+ -| get_json_double('{"k1.key":{"k2":[1.1, 2.2]}}', '$."k1.key".k2[0]') | -+---------------------------------------------------------------------+ -| 1.1 | -+---------------------------------------------------------------------+ -``` -### keywords -GET_JSON_DOUBLE,GET,JSON,DOUBLE diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/get-json-int.md b/docs/en/docs/sql-manual/sql-functions/json-functions/get-json-int.md deleted file mode 100644 index 069308e3c2912b..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/get-json-int.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -{ - "title": "GET_JSON_INT", - "language": "en" -} ---- - - - -## get_json_int -### Description -#### Syntax - -`INT get_json_int(VARCHAR json_str, VARCHAR json_path)` - - -Parse and retrieve the integer content of the specified path in the JSON string. -Where json_path must start with the $symbol and use. as the path splitter. If the path contains..., double quotation marks can be used to surround it. -Use [] to denote array subscripts, starting at 0. -The content of path cannot contain ",[and]. -If the json_string format is incorrect, or the json_path format is incorrect, or matches cannot be found, NULL is returned. - -In addition, it is recommended to use the jsonb type and jsonb_extract_XXX function performs the same function. - -### example - -1. Get the value of key as "k1" - -``` -mysql> SELECT get_json_int('{"k1":1, "k2":"2"}', "$.k1"); -+--------------------------------------------+ -| get_json_int('{"k1":1, "k2":"2"}', '$.k1') | -+--------------------------------------------+ -| 1 | -+--------------------------------------------+ -``` - -2. Get the second element of the array whose key is "my. key" - -``` -mysql> SELECT get_json_int('{"k1":"v1", "my.key":[1, 2, 3]}', '$."my.key"[1]'); -+------------------------------------------------------------------+ -| get_json_int('{"k1":"v1", "my.key":[1, 2, 3]}', '$."my.key"[1]') | -+------------------------------------------------------------------+ -| 2 | -+------------------------------------------------------------------+ -``` - -3. Get the first element in an array whose secondary path is k1. key - > K2 -``` -mysql> SELECT get_json_int('{"k1.key":{"k2":[1, 2]}}', '$."k1.key".k2[0]'); -+--------------------------------------------------------------+ -| get_json_int('{"k1.key":{"k2":[1, 2]}}', '$."k1.key".k2[0]') | -+--------------------------------------------------------------+ -| 1 | -+--------------------------------------------------------------+ -``` -### keywords -GET_JSON_INT,GET,JSON,INT diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/get-json-string.md b/docs/en/docs/sql-manual/sql-functions/json-functions/get-json-string.md deleted file mode 100644 index 16de495afc54b0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/get-json-string.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -{ - "title": "GET_JSON_STRING", - "language": "en" -} ---- - - - -## get_json_string -### description -#### Syntax - -`VARCHAR get_json_string (VARCHAR json str, VARCHAR json path)` - - -Parse and retrieve the string content of the specified path in the JSON string. -Where json_path must start with the $symbol and use. as the path splitter. If the path contains..., double quotation marks can be used to surround it. -Use [] to denote array subscripts, starting at 0. -The content of path cannot contain ",[and]. -If the json_string format is incorrect, or the json_path format is incorrect, or matches cannot be found, NULL is returned. - -In addition, it is recommended to use the jsonb type and jsonb_extract_XXX function performs the same function. - -### example - -1. Get the value of key as "k1" - -``` -mysql> SELECT get_json_string('{"k1":"v1", "k2":"v2"}', "$.k1"); -+---------------------------------------------------+ -| get_json_string('{"k1":"v1", "k2":"v2"}', '$.k1') | -+---------------------------------------------------+ -| v1 | -+---------------------------------------------------+ -``` - -2. Get the second element of the array whose key is "my. key" - -``` -mysql> SELECT get_json_string('{"k1":"v1", "my.key":["e1", "e2", "e3"]}', '$."my.key"[1]'); -+------------------------------------------------------------------------------+ -| get_json_string('{"k1":"v1", "my.key":["e1", "e2", "e3"]}', '$."my.key"[1]') | -+------------------------------------------------------------------------------+ -| e2 | -+------------------------------------------------------------------------------+ -``` - -3. Get the first element in an array whose secondary path is k1. key - > K2 -``` -mysql> SELECT get_json_string('{"k1.key":{"k2":["v1", "v2"]}}', '$."k1.key".k2[0]'); -+-----------------------------------------------------------------------+ -| get_json_string('{"k1.key":{"k2":["v1", "v2"]}}', '$."k1.key".k2[0]') | -+-----------------------------------------------------------------------+ -| v1 | -+-----------------------------------------------------------------------+ -``` - -4. Get all the values in the array where the key is "k1" -``` -mysql> SELECT get_json_string('[{"k1":"v1"}, {"k2":"v2"}, {"k1":"v3"}, {"k1":"v4"}]', '$.k1'); -+---------------------------------------------------------------------------------+ -| get_json_string('[{"k1":"v1"}, {"k2":"v2"}, {"k1":"v3"}, {"k1":"v4"}]', '$.k1') | -+---------------------------------------------------------------------------------+ -| ["v1","v3","v4"] | -+---------------------------------------------------------------------------------+ -``` -### keywords -GET_JSON_STRING,GET,JSON,STRING diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-array.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-array.md deleted file mode 100644 index 5a038c24cfe008..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-array.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -{ - "title": "JSON_ARRAY", - "language": "en" -} ---- - - - -## json_array -### Description -#### Syntax - -`VARCHAR json_array(VARCHAR,...)` - - -Generate a json array containing the specified values, return empty if no values - -### example - -``` -MySQL> select json_array(); -+--------------+ -| json_array() | -+--------------+ -| [] | -+--------------+ - -MySQL> select json_array(null); -+--------------------+ -| json_array('NULL') | -+--------------------+ -| [NULL] | -+--------------------+ - - -MySQL> SELECT json_array(1, "abc", NULL, TRUE, CURTIME()); -+-----------------------------------------------+ -| json_array(1, 'abc', 'NULL', TRUE, curtime()) | -+-----------------------------------------------+ -| [1, "abc", NULL, TRUE, "10:41:15"] | -+-----------------------------------------------+ - - -MySQL> select json_array("a", null, "c"); -+------------------------------+ -| json_array('a', 'NULL', 'c') | -+------------------------------+ -| ["a", NULL, "c"] | -+------------------------------+ -``` -### keywords -json,array,json_array diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-contains.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-contains.md deleted file mode 100644 index 78491b6462b344..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-contains.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -{ -"title": "JSON_CONTAINS", -"language": "en" -} ---- - - - -## json_contains -### description -#### Syntax - -`BOOLEAN json_contains(JSON json_str, JSON candidate)` -`BOOLEAN json_contains(JSON json_str, JSON candidate, VARCHAR json_path)` -`BOOLEAN json_contains(VARCHAR json_str, VARCHAR candidate, VARCHAR json_path)` - - -Indicates by returning 1 or 0 whether a given candidate JSON document is contained at a specific path within the json_str JSON document - -### example - -``` -mysql> SET @j = '{"a": 1, "b": 2, "c": {"d": 4}}'; -mysql> SET @j2 = '1'; -mysql> SELECT JSON_CONTAINS(@j, @j2, '$.a'); -+-------------------------------+ -| JSON_CONTAINS(@j, @j2, '$.a') | -+-------------------------------+ -| 1 | -+-------------------------------+ -mysql> SELECT JSON_CONTAINS(@j, @j2, '$.b'); -+-------------------------------+ -| JSON_CONTAINS(@j, @j2, '$.b') | -+-------------------------------+ -| 0 | -+-------------------------------+ - -mysql> SET @j2 = '{"d": 4}'; -mysql> SELECT JSON_CONTAINS(@j, @j2, '$.a'); -+-------------------------------+ -| JSON_CONTAINS(@j, @j2, '$.a') | -+-------------------------------+ -| 0 | -+-------------------------------+ -mysql> SELECT JSON_CONTAINS(@j, @j2, '$.c'); -+-------------------------------+ -| JSON_CONTAINS(@j, @j2, '$.c') | -+-------------------------------+ -| 1 | -+-------------------------------+ - -mysql> SELECT json_contains('[1, 2, {"x": 3}]', '1'); -+----------------------------------------+ -| json_contains('[1, 2, {"x": 3}]', '1') | -+----------------------------------------+ -| 1 | -+----------------------------------------+ -1 row in set (0.04 sec) -``` -### keywords -json,json_contains diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-exists-path.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-exists-path.md deleted file mode 100644 index dcea0736f4e1b3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-exists-path.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -{ - "title": "JSON_EXISTS_PATH", - "language": "en" -} ---- - - - -## json_exists_path - -### description - -It is used to judge whether the field specified by json_path exists in the JSON data. If it exists, it returns TRUE, and if it does not exist, it returns FALSE - -#### Syntax - -```sql -BOOLEAN json_exists_path(JSON j, VARCHAR json_path) -``` - -### example - -Refer to [json tutorial](../../sql-reference/Data-Types/JSON.md) - -### keywords - -json_exists_path - diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-extract.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-extract.md deleted file mode 100644 index 2e9aff54b0164b..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-extract.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -{ - "title": "JSON_EXTRACT", - "language": "en" -} ---- - - - -## json_extract - - - -### description - -#### Syntax - -```sql -`VARCHAR json_extract(VARCHAR json_str, VARCHAR path[, VARCHAR path] ...))` -JSON jsonb_extract(JSON j, VARCHAR json_path) -BOOLEAN json_extract_isnull(JSON j, VARCHAR json_path) -BOOLEAN json_extract_bool(JSON j, VARCHAR json_path) -INT json_extract_int(JSON j, VARCHAR json_path) -BIGINT json_extract_bigint(JSON j, VARCHAR json_path) -LARGEINT json_extract_largeint(JSON j, VARCHAR json_path) -DOUBLE json_extract_double(JSON j, VARCHAR json_path) -STRING json_extract_string(JSON j, VARCHAR json_path) -``` - -json_extract functions extract field specified by json_path from JSON. A series of functions are provided for different datatype. -- json_extract with VARCHAR argument, extract and return VARCHAR datatype -- jsonb_extract extract and return JSON datatype -- json_extract_isnull check if the field is json null and return BOOLEAN datatype -- json_extract_bool extract and return BOOLEAN datatype -- json_extract_int extract and return INT datatype -- json_extract_bigint extract and return BIGINT datatype -- json_extract_largeint extract and return LARGEINT datatype -- json_extract_double extract and return DOUBLE datatype -- json_extract_STRING extract and return STRING datatype - -json path syntax: -- '$' for json document root -- '.k1' for element of json object with key 'k1' - - If the key column value contains ".", double quotes are required in json_path, For example: SELECT json_extract('{"k1.a":"abc","k2":300}', '$."k1.a"'); -- '[i]' for element of json array at index i - - Use '$[last]' to get the last element of json_array, and '$[last-1]' to get the penultimate element, and so on. - - -Exception handling is as follows: -- if the field specified by json_path does not exist, return NULL -- if datatype of the field specified by json_path is not the same with type of json_extract_t, return t if it can be cast to t else NULL - - -## json_exists_path and json_type -### description - -#### Syntax - -```sql -BOOLEAN json_exists_path(JSON j, VARCHAR json_path) -STRING json_type(JSON j, VARCHAR json_path) -``` - -There are two extra functions to check field existence and type -- json_exists_path check the existence of the field specified by json_path, return TRUE or FALS -- json_type get the type as follows of the field specified by json_path, return NULL if it does not exist - - object - - array - - null - - bool - - int - - bigint - - largeint - - double - - string - -### example - -refer to [json tutorial](../../sql-reference/Data-Types/JSON.md) for more. - -``` -mysql> SELECT json_extract('{"id": 123, "name": "doris"}', '$.id'); -+------------------------------------------------------+ -| json_extract('{"id": 123, "name": "doris"}', '$.id') | -+------------------------------------------------------+ -| 123 | -+------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql> SELECT json_extract('[1, 2, 3]', '$.[1]'); -+------------------------------------+ -| json_extract('[1, 2, 3]', '$.[1]') | -+------------------------------------+ -| 2 | -+------------------------------------+ -1 row in set (0.01 sec) - -mysql> SELECT json_extract('{"k1": "v1", "k2": { "k21": 6.6, "k22": [1, 2] } }', '$.k1', '$.k2.k21', '$.k2.k22', '$.k2.k22[1]'); -+-------------------------------------------------------------------------------------------------------------------+ -| json_extract('{"k1": "v1", "k2": { "k21": 6.6, "k22": [1, 2] } }', '$.k1', '$.k2.k21', '$.k2.k22', '$.k2.k22[1]') | -+-------------------------------------------------------------------------------------------------------------------+ -| ["v1",6.6,[1,2],2] | -+-------------------------------------------------------------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql> SELECT json_extract('{"id": 123, "name": "doris"}', '$.aaa', '$.name'); -+-----------------------------------------------------------------+ -| json_extract('{"id": 123, "name": "doris"}', '$.aaa', '$.name') | -+-----------------------------------------------------------------+ -| [null,"doris"] | -+-----------------------------------------------------------------+ -1 row in set (0.01 sec) -``` - - -### keywords -JSONB, JSON, json_extract, json_extract_isnull, json_extract_bool, json_extract_int, json_extract_bigint, json_extract_largeint,json_extract_double, json_extract_string, json_exists_path, json_type \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-insert.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-insert.md deleted file mode 100644 index 4d6ce999837fd8..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-insert.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -{ - "title": "JSON_INSERT", - "language": "en" -} ---- - - - -## json_insert - - - -### Description -#### Syntax - -`VARCHAR json_insert(VARCHAR json_str, VARCHAR path, VARCHAR val[, VARCHAR path, VARCHAR val] ...)` - - -`json_insert` function inserts data in a JSON and returns the result.Returns NULL if `json_str` or `path` is NULL. Otherwise, an error occurs if the `json_str` argument is not a valid JSON or any path argument is not a valid path expression or contains a * wildcard. - -The path-value pairs are evaluated left to right. - -A path-value pair for a nonexisting path in the json adds the value to the json if the path identifies one of these types of values: - -* A member not present in an existing object. The member is added to the object and associated with the new value. - -* A position past the end of an existing array. The array is extended with the new value. If the existing value is not an array, it is autowrapped as an array, then extended with the new value. - -Otherwise, a path-value pair for a nonexisting path in the json is ignored and has no effect. - -### example - -``` -MySQL> select json_insert(null, null, null); -+---------------------------------+ -| json_insert(NULL, NULL, 'NULL') | -+---------------------------------+ -| NULL | -+---------------------------------+ - -MySQL> select json_insert('{"k": 1}', "$.k", 2); -+---------------------------------------+ -| json_insert('{\"k\": 1}', '$.k', '2') | -+---------------------------------------+ -| {"k":1} | -+---------------------------------------+ - -MySQL> select json_insert('{"k": 1}', "$.j", 2); -+---------------------------------------+ -| json_insert('{\"k\": 1}', '$.j', '2') | -+---------------------------------------+ -| {"k":1,"j":2} | -+---------------------------------------+ -``` - -### keywords -JSON, json_insert diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-length.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-length.md deleted file mode 100644 index 6ea8f253142546..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-length.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -{ -"title": "JSON_LENGTH", -"language": "en" -} ---- - - - -## json_length -### description -#### Syntax - -`INT json_length(JSON json_str)` -`INT json_length(JSON json_str, VARCHAR json_path)` - -If specified path, the JSON_LENGTH() function returns the length of the data matching the path in the JSON document, otherwise it returns the length of the JSON document. The function calculates the length of the JSON document according to the following rules: - -* The length of a scalar is 1. For example, the length of 1, '"x"', true, false, null is all 1. -* The length of an array is the number of array elements. For example, the length of [1, 2] is 2. -* The length of an object is the number of object members. For example, the length of {"x": 1} is 1. - -### example - -``` -mysql> SELECT json_length('{"k1":"v31","k2":300}'); -+--------------------------------------+ -| json_length('{"k1":"v31","k2":300}') | -+--------------------------------------+ -| 2 | -+--------------------------------------+ -1 row in set (0.26 sec) - -mysql> SELECT json_length('"abc"'); -+----------------------+ -| json_length('"abc"') | -+----------------------+ -| 1 | -+----------------------+ -1 row in set (0.17 sec) - -mysql> SELECT json_length('{"x": 1, "y": [1, 2]}', '$.y'); -+---------------------------------------------+ -| json_length('{"x": 1, "y": [1, 2]}', '$.y') | -+---------------------------------------------+ -| 2 | -+---------------------------------------------+ -1 row in set (0.07 sec) -``` -### keywords -json,json_length diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-object.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-object.md deleted file mode 100644 index 33199db20a4aa0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-object.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -{ - "title": "JSON_OBJECT", - "language": "en" -} ---- - - - -## json_object -### Description -#### Syntax - -`VARCHAR json_object(VARCHAR,...)` - - -Generate a json object containing the specified Key-Value, -an exception error is returned when Key is NULL or the number of parameters are odd. - -### example - -``` -MySQL> select json_object(); -+---------------+ -| json_object() | -+---------------+ -| {} | -+---------------+ - -MySQL> select json_object('time',curtime()); -+--------------------------------+ -| json_object('time', curtime()) | -+--------------------------------+ -| {"time": "10:49:18"} | -+--------------------------------+ - - -MySQL> SELECT json_object('id', 87, 'name', 'carrot'); -+-----------------------------------------+ -| json_object('id', 87, 'name', 'carrot') | -+-----------------------------------------+ -| {"id": 87, "name": "carrot"} | -+-----------------------------------------+ - - -MySQL> select json_object('username',null); -+---------------------------------+ -| json_object('username', 'NULL') | -+---------------------------------+ -| {"username": NULL} | -+---------------------------------+ -``` -### keywords -json,object,json_object diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-parse.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-parse.md deleted file mode 100644 index 78ac813a6280d4..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-parse.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -{ - "title": "JSON_PARSE", - "language": "en" -} ---- - - - -## json_parse -### description -#### Syntax - -```sql -JSON json_parse(VARCHAR json_str) -JSON json_parse_error_to_null(VARCHAR json_str) -JSON json_parse_error_to_value(VARCHAR json_str, VARCHAR default_json_str) -``` - -json_parse functions parse JSON string to binary format. A series of functions are provided to satisfy different demand for exception handling. -- all return NULL if json_str is NULL -- if json_str is not valid - - json_parse will report error - - json_parse_error_to_null will return NULL - - json_parse_error_to_value will return the value specified by default_json_str - -### example - -1. parse valid JSON string - -``` -mysql> SELECT json_parse('{"k1":"v31","k2":300}'); -+--------------------------------------+ -| json_parse('{"k1":"v31","k2":300}') | -+--------------------------------------+ -| {"k1":"v31","k2":300} | -+--------------------------------------+ -1 row in set (0.01 sec) -``` - -2. parse invalid JSON string - -``` -mysql> SELECT json_parse('invalid json'); -ERROR 1105 (HY000): errCode = 2, detailMessage = json parse error: Invalid document: document must be an object or an array for value: invalid json - -mysql> SELECT json_parse_error_to_null('invalid json'); -+-------------------------------------------+ -| json_parse_error_to_null('invalid json') | -+-------------------------------------------+ -| NULL | -+-------------------------------------------+ -1 row in set (0.01 sec) - -mysql> SELECT json_parse_error_to_value('invalid json', '{}'); -+--------------------------------------------------+ -| json_parse_error_to_value('invalid json', '{}') | -+--------------------------------------------------+ -| {} | -+--------------------------------------------------+ -1 row in set (0.00 sec) -``` - -refer to json tutorial for more. - -### keywords -JSONB, JSON, json_parse, json_parse_error_to_null, json_parse_error_to_value \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-quote.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-quote.md deleted file mode 100644 index 16a3f35f1a46c1..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-quote.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -{ - "title": "JSON_QUOTE", - "language": "en" -} ---- - - - -## json_quote -### Description -#### Syntax - -`VARCHAR json_quote(VARCHAR)` - - -Enclose json_value in double quotes ("), escape special characters contained. - -### example - -``` -MySQL> SELECT json_quote('null'), json_quote('"null"'); -+--------------------+----------------------+ -| json_quote('null') | json_quote('"null"') | -+--------------------+----------------------+ -| "null" | "\"null\"" | -+--------------------+----------------------+ - - -MySQL> SELECT json_quote('[1, 2, 3]'); -+-------------------------+ -| json_quote('[1, 2, 3]') | -+-------------------------+ -| "[1, 2, 3]" | -+-------------------------+ - - -MySQL> SELECT json_quote(null); -+------------------+ -| json_quote(null) | -+------------------+ -| NULL | -+------------------+ - -MySQL> select json_quote("\n\b\r\t"); -+------------------------+ -| json_quote('\n\b\r\t') | -+------------------------+ -| "\n\b\r\t" | -+------------------------+ -``` -### keywords -json,quote,json_quote diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-replace.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-replace.md deleted file mode 100644 index 7fad215be565ac..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-replace.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -{ - "title": "JSON_REPLACE", - "language": "en" -} ---- - - - -## json_replace - - - -### Description -#### Syntax - -`VARCHAR json_replace(VARCHAR json_str, VARCHAR path, VARCHAR val[, VARCHAR path, VARCHAR val] ...)` - - -`json_replace` function updates data in a JSON and returns the result.Returns NULL if `json_str` or `path` is NULL. Otherwise, an error occurs if the `json_str` argument is not a valid JSON or any path argument is not a valid path expression or contains a * wildcard. - -The path-value pairs are evaluated left to right. - -A path-value pair for an existing path in the json overwrites the existing json value with the new value. - -Otherwise, a path-value pair for a nonexisting path in the json is ignored and has no effect. - -### example - -``` -MySQL> select json_replace(null, null, null); -+----------------------------------+ -| json_replace(NULL, NULL, 'NULL') | -+----------------------------------+ -| NULL | -+----------------------------------+ - -MySQL> select json_replace('{"k": 1}', "$.k", 2); -+----------------------------------------+ -| json_replace('{\"k\": 1}', '$.k', '2') | -+----------------------------------------+ -| {"k":2} | -+----------------------------------------+ - -MySQL> select json_replace('{"k": 1}', "$.j", 2); -+----------------------------------------+ -| json_replace('{\"k\": 1}', '$.j', '2') | -+----------------------------------------+ -| {"k":1} | -+----------------------------------------+ -``` - -### keywords -JSON, json_replace diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-set.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-set.md deleted file mode 100644 index 42b47c58a8d560..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-set.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -{ - "title": "JSON_SET", - "language": "en" -} ---- - - - -## json_set - - - -### Description -#### Syntax - -`VARCHAR json_set(VARCHAR json_str, VARCHAR path, VARCHAR val[, VARCHAR path, VARCHAR val] ...)` - - -`json_set` function inserts or updates data in a JSON and returns the result.Returns NULL if `json_str` or `path` is NULL. Otherwise, an error occurs if the `json_str` argument is not a valid JSON or any path argument is not a valid path expression or contains a * wildcard. - -The path-value pairs are evaluated left to right. - -A path-value pair for an existing path in the json overwrites the existing json value with the new value. A path-value pair for a nonexisting path in the json adds the value to the json if the path identifies one of these types of values: - -* A member not present in an existing object. The member is added to the object and associated with the new value. - -* A position past the end of an existing array. The array is extended with the new value. If the existing value is not an array, it is autowrapped as an array, then extended with the new value. - -Otherwise, a path-value pair for a nonexisting path in the json is ignored and has no effect. - -### example - -``` -MySQL> select json_set(null, null, null); -+------------------------------+ -| json_set(NULL, NULL, 'NULL') | -+------------------------------+ -| NULL | -+------------------------------+ - -MySQL> select json_set('{"k": 1}', "$.k", 2); -+------------------------------------+ -| json_set('{\"k\": 1}', '$.k', '2') | -+------------------------------------+ -| {"k":2} | -+------------------------------------+ - -MySQL> select json_set('{"k": 1}', "$.j", 2); -+------------------------------------+ -| json_set('{\"k\": 1}', '$.j', '2') | -+------------------------------------+ -| {"k":1,"j":2} | -+------------------------------------+ -``` - -### keywords -JSON, json_set diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-type.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-type.md deleted file mode 100644 index eaa92930945ca0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-type.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "JSON_TYPE", - "language": "en" -} ---- - - - -## json_type - -### description - -It is used to determine the type of the field specified by json_path in JSON data. If the field does not exist, return NULL. If it exists, return one of the following types - -- object -- array -- null -- bool -- int -- bigint -- largeint -- double -- string - -#### Syntax - -```sql -STRING json_type(JSON j, VARCHAR json_path) -``` - -### example - -Refer to [json tutorial](../../sql-reference/Data-Types/JSON.md) - -### keywords - -json_type - diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-unquote.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-unquote.md deleted file mode 100644 index 4a059b68d798d8..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-unquote.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -{ - "title": "JSON_UNQUOTE", - "language": "en" -} ---- - - - -## json_unquote -### Description -#### Syntax - -`VARCHAR json_unquote(VARCHAR)` - -This function unquotes a JSON value and returns the result as a utf8mb4 string. If the argument is NULL, it will return NULL. - -Escape sequences within a string as shown in the following table will be recognized. Backslashes will be ignored for all other escape sequences. - -| Escape Sequence | Character Represented by Sequence | -|-----------------|------------------------------------| -| \" | A double quote (") character | -| \b | A backspace character | -| \f | A formfeed character | -| \n | A newline (linefeed) character | -| \r | A carriage return character | -| \t | A tab character | -| \\ | A backslash (\) character | -| \uxxxx | UTF-8 bytes for Unicode value XXXX | - - - -### example - -``` -mysql> SELECT json_unquote('"doris"'); -+-------------------------+ -| json_unquote('"doris"') | -+-------------------------+ -| doris | -+-------------------------+ - -mysql> SELECT json_unquote('[1, 2, 3]'); -+---------------------------+ -| json_unquote('[1, 2, 3]') | -+---------------------------+ -| [1, 2, 3] | -+---------------------------+ - - -mysql> SELECT json_unquote(null); -+--------------------+ -| json_unquote(NULL) | -+--------------------+ -| NULL | -+--------------------+ - -mysql> SELECT json_unquote('"\\ttest"'); -+--------------------------+ -| json_unquote('"\ttest"') | -+--------------------------+ -| test | -+--------------------------+ -``` -### keywords -json,unquote,json_unquote diff --git a/docs/en/docs/sql-manual/sql-functions/json-functions/json-valid.md b/docs/en/docs/sql-manual/sql-functions/json-functions/json-valid.md deleted file mode 100644 index 24bbd95d533615..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/json-functions/json-valid.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -{ - "title": "JSON_VALID", - "language": "en" -} ---- - - - -## json_valid -### description - -json_valid functions returns 0 or 1 to indicate whether a value is valid JSON and Returns NULL if the argument is NULL. - -#### Syntax - -`JSONB json_valid(VARCHAR json_str)` - -### example - -1. parse valid JSON string - -``` -MySQL > SELECT json_valid('{"k1":"v31","k2":300}'); -+-------------------------------------+ -| json_valid('{"k1":"v31","k2":300}') | -+-------------------------------------+ -| 1 | -+-------------------------------------+ -1 row in set (0.02 sec) -``` - -2. parse invalid JSON string - -``` -MySQL > SELECT json_valid('invalid json'); -+----------------------------+ -| json_valid('invalid json') | -+----------------------------+ -| 0 | -+----------------------------+ -1 row in set (0.02 sec) -``` - -3. parse NULL - -``` -MySQL > select json_valid(NULL); -+------------------+ -| json_valid(NULL) | -+------------------+ -| NULL | -+------------------+ -1 row in set (0.02 sec) -``` - -### keywords -JSON, VALID, JSON_VALID diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/abs.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/abs.md deleted file mode 100644 index 865b58a95c1515..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/abs.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -{ - "title": "ABS", - "language": "en" -} ---- - - - -## abs - -### description -#### Syntax - -```sql -SMALLINT abs(TINYINT x) -INT abs(SMALLINT x) -BIGINT abs(INT x) -LARGEINT abs(BIGINT x) -LARGEINT abs(LARGEINT x) -DOUBLE abs(DOUBLE x) -FLOAT abs(FLOAT x) -DECIMAL abs(DECIMAL x)` -``` - -Returns the absolute value of `x`. - -### example - -``` -mysql> select abs(-2); -+---------+ -| abs(-2) | -+---------+ -| 2 | -+---------+ -mysql> select abs(3.254655654); -+------------------+ -| abs(3.254655654) | -+------------------+ -| 3.254655654 | -+------------------+ -mysql> select abs(-3254654236547654354654767); -+---------------------------------+ -| abs(-3254654236547654354654767) | -+---------------------------------+ -| 3254654236547654354654767 | -+---------------------------------+ -``` - -### keywords - ABS diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/acos.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/acos.md deleted file mode 100644 index 0ab2f9cb81f0c1..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/acos.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "ACOS", - "language": "en" -} ---- - - - -## acos - -### description -#### Syntax - -`DOUBLE acos(DOUBLE x)` -Returns the arc cosine of `x`, or `nan` if `x` is not in the range `-1` to `1`. - -### example - -``` -mysql> select acos(1); -+-----------+ -| acos(1.0) | -+-----------+ -| 0 | -+-----------+ -mysql> select acos(0); -+--------------------+ -| acos(0.0) | -+--------------------+ -| 1.5707963267948966 | -+--------------------+ -mysql> select acos(-2); -+------------+ -| acos(-2.0) | -+------------+ -| nan | -+------------+ -``` - -### keywords - ACOS diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/asin.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/asin.md deleted file mode 100644 index 84f466c2e7d7c2..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/asin.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -{ - "title": "ASIN", - "language": "en" -} ---- - - - -## asin - -### description -#### Syntax - -`DOUBLE asin(DOUBLE x)` -Returns the arc sine of `x`, or `nan` if `x` is not in the range `-1` to `1`. - -### example - -``` -mysql> select asin(0.5); -+---------------------+ -| asin(0.5) | -+---------------------+ -| 0.52359877559829893 | -+---------------------+ -mysql> select asin(2); -+-----------+ -| asin(2.0) | -+-----------+ -| nan | -+-----------+ -``` - -### keywords - ASIN diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/atan.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/atan.md deleted file mode 100644 index c8605b1f559148..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/atan.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -{ - "title": "ATAN", - "language": "en" -} ---- - - - -## atan - -### description -#### Syntax - -`DOUBLE atan(DOUBLE x)` -Returns the arctangent of `x`, where `x` is in radians. - -### example - -``` -mysql> select atan(0); -+-----------+ -| atan(0.0) | -+-----------+ -| 0 | -+-----------+ -mysql> select atan(2); -+--------------------+ -| atan(2.0) | -+--------------------+ -| 1.1071487177940904 | -+--------------------+ -``` - -### keywords - ATAN diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/atan2.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/atan2.md deleted file mode 100644 index e842b3ad4b9bdd..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/atan2.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -{ - "title": "ATAN2", - "language": "en" -} ---- - - - -## atan2 - -### description -#### Syntax - -`DOUBLE atan2(DOUBLE y, DOUBLE x)` -Returns the arc tangent of 'y' / 'x'. - -### example - -``` -mysql> select atan2(0.1, 0.2); -+---------------------+ -| atan2(0.1, 0.2) | -+---------------------+ -| 0.46364760900080609 | -+---------------------+ - -mysql> select atan2(1.0, 1.0); -+---------------------+ -| atan2(1.0, 1.0) | -+---------------------+ -| 0.78539816339744828 | -+---------------------+ -``` - -### keywords - ATAN2 diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/bin.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/bin.md deleted file mode 100644 index a24aea386d65ec..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/bin.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "BIN", - "language": "en" -} ---- - - - -## bin - -### description -#### Syntax - -`STRING bin(BIGINT x)` -Convert the decimal number `x` to binary. - -### example - -``` -mysql> select bin(0); -+--------+ -| bin(0) | -+--------+ -| 0 | -+--------+ -mysql> select bin(10); -+---------+ -| bin(10) | -+---------+ -| 1010 | -+---------+ -mysql> select bin(-3); -+------------------------------------------------------------------+ -| bin(-3) | -+------------------------------------------------------------------+ -| 1111111111111111111111111111111111111111111111111111111111111101 | -+------------------------------------------------------------------+ -``` - -### keywords - BIN diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/cbrt.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/cbrt.md deleted file mode 100644 index 96c4edb1040216..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/cbrt.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "CBRT", - "language": "en" -} ---- - - - -## cbrt - -### description -#### Syntax - -`DOUBLE cbrt(DOUBLE x)` -Returns the cube root of x. - -### example - -``` -mysql> select cbrt(8); -+-----------+ -| cbrt(8.0) | -+-----------+ -| 2 | -+-----------+ -mysql> select cbrt(2.0); -+--------------------+ -| cbrt(2.0) | -+--------------------+ -| 1.2599210498948734 | -+--------------------+ -mysql> select cbrt(-1000.0); -+---------------+ -| cbrt(-1000.0) | -+---------------+ -| -10 | -+---------------+ -``` - -### keywords - CBRT diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/ceil.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/ceil.md deleted file mode 100644 index 8efaeabb8dbdde..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/ceil.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "CEIL", - "language": "en" -} ---- - - - -## ceil - -### description -#### Syntax - -`BIGINT ceil(DOUBLE x)` -Returns the smallest integer value greater than or equal to `x`. - -### example - -``` -mysql> select ceil(1); -+-----------+ -| ceil(1.0) | -+-----------+ -| 1 | -+-----------+ -mysql> select ceil(2.4); -+-----------+ -| ceil(2.4) | -+-----------+ -| 3 | -+-----------+ -mysql> select ceil(-10.3); -+-------------+ -| ceil(-10.3) | -+-------------+ -| -10 | -+-------------+ -``` - -### keywords - CEIL diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/conv.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/conv.md deleted file mode 100644 index d585beaa2b36bc..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/conv.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -{ - "title": "CONV", - "language": "en" -} ---- - - - -## conv - -### description -#### Syntax - -```sql -VARCHAR CONV(VARCHAR input, TINYINT from_base, TINYINT to_base) -VARCHAR CONV(BIGINT input, TINYINT from_base, TINYINT to_base) -``` -Convert the input number to the target base. The input base range should be within `[2,36]`. - -### example - -``` -MySQL [test]> SELECT CONV(15,10,2); -+-----------------+ -| conv(15, 10, 2) | -+-----------------+ -| 1111 | -+-----------------+ - -MySQL [test]> SELECT CONV('ff',16,10); -+--------------------+ -| conv('ff', 16, 10) | -+--------------------+ -| 255 | -+--------------------+ - -MySQL [test]> SELECT CONV(230,10,16); -+-------------------+ -| conv(230, 10, 16) | -+-------------------+ -| E6 | -+-------------------+ -``` - -### keywords - CONV diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/cos.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/cos.md deleted file mode 100644 index 44ea2564fb6a90..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/cos.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "COS", - "language": "en" -} ---- - - - -## cos - -### description -#### Syntax - -`DOUBLE cos(DOUBLE x)` -Returns the cosine of `x`, where `x` is in radians - -### example - -``` -mysql> select cos(1); -+---------------------+ -| cos(1.0) | -+---------------------+ -| 0.54030230586813977 | -+---------------------+ -mysql> select cos(0); -+----------+ -| cos(0.0) | -+----------+ -| 1 | -+----------+ -mysql> select cos(Pi()); -+-----------+ -| cos(pi()) | -+-----------+ -| -1 | -+-----------+ -``` - -### keywords - COS diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/cosh.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/cosh.md deleted file mode 100644 index 8309649aed3d0e..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/cosh.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -{ - "title": "COSH", - "language": "en" -} ---- - - - -## cosh - -### description -#### Syntax - -`DOUBLE cosh(DOUBLE x)` -Returns the hyperbolic cosine of `x`. - -### example - -``` -mysql> select cosh(0); -+---------+ -| cosh(0) | -+---------+ -| 1 | -+---------+ - -mysql> select cosh(1); -+---------------------+ -| cosh(1) | -+---------------------+ -| 1.5430806348152437 | -+---------------------+ -``` - -### keywords - COSH diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/degrees.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/degrees.md deleted file mode 100644 index d646e503ccb55d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/degrees.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "DEGREES", - "language": "en" -} ---- - - - -## degrees - -### description -#### Syntax - -`DOUBLE degrees(DOUBLE x)` -Returns the degree of `x`, converted from radians to degrees. - -### example - -``` -mysql> select degrees(0); -+--------------+ -| degrees(0.0) | -+--------------+ -| 0 | -+--------------+ -mysql> select degrees(2); -+--------------------+ -| degrees(2.0) | -+--------------------+ -| 114.59155902616465 | -+--------------------+ -mysql> select degrees(Pi()); -+---------------+ -| degrees(pi()) | -+---------------+ -| 180 | -+---------------+ -``` - -### keywords - DEGREES diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/e.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/e.md deleted file mode 100644 index e7b924201ce2f5..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/e.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -{ - "title": "E", - "language": "en" -} ---- - - - -## e - -### description -#### Syntax - -`DOUBLE e()` -Returns the constant `e` value. - -### example - -``` -mysql> select e(); -+--------------------+ -| e() | -+--------------------+ -| 2.7182818284590451 | -+--------------------+ -``` - -### keywords - E diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/exp.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/exp.md deleted file mode 100644 index bf8e83e3410d38..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/exp.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -{ - "title": "EXP", - "language": "en" -} ---- - - - -## exp - -### description -#### Syntax - -`DOUBLE exp(DOUBLE x)` -Returns `x` raised to the base `e`. - -### example - -``` -mysql> select exp(2); -+------------------+ -| exp(2.0) | -+------------------+ -| 7.38905609893065 | -+------------------+ -mysql> select exp(3.4); -+--------------------+ -| exp(3.4) | -+--------------------+ -| 29.964100047397011 | -+--------------------+ -``` - -### keywords - EXP diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/floor.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/floor.md deleted file mode 100644 index 23e21d1bcaca0a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/floor.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "FLOOR", - "language": "en" -} ---- - - - -## floor - -### description -#### Syntax - -`BIGINT floor(DOUBLE x)` -Returns the largest integer value less than or equal to `x`. - -### example - -``` -mysql> select floor(1); -+------------+ -| floor(1.0) | -+------------+ -| 1 | -+------------+ -mysql> select floor(2.4); -+------------+ -| floor(2.4) | -+------------+ -| 2 | -+------------+ -mysql> select floor(-10.3); -+--------------+ -| floor(-10.3) | -+--------------+ -| -11 | -+--------------+ -``` - -### keywords - FLOOR diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/greatest.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/greatest.md deleted file mode 100644 index 279e7d80002bf1..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/greatest.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -{ - "title": "GREATEST", - "language": "en" -} ---- - - - -## greatest - -### description -#### Syntax - -`greatest(col_a, col_b, …, col_n)` - -`column` supports the following types: `TINYINT` `SMALLINT` `INT` `BIGINT` `LARGEINT` `FLOAT` `DOUBLE` `STRING` `DATETIME` `DECIMAL` - -Compares the size of `n columns` and returns the largest among them. If there is `NULL` in `column`, it returns `NULL`. - -### example - -``` -mysql> select greatest(-1, 0, 5, 8); -+-----------------------+ -| greatest(-1, 0, 5, 8) | -+-----------------------+ -| 8 | -+-----------------------+ -mysql> select greatest(-1, 0, 5, NULL); -+--------------------------+ -| greatest(-1, 0, 5, NULL) | -+--------------------------+ -| NULL | -+--------------------------+ -mysql> select greatest(6.3, 4.29, 7.6876); -+-----------------------------+ -| greatest(6.3, 4.29, 7.6876) | -+-----------------------------+ -| 7.6876 | -+-----------------------------+ -mysql> select greatest("2022-02-26 20:02:11","2020-01-23 20:02:11","2020-06-22 20:02:11"); -+-------------------------------------------------------------------------------+ -| greatest('2022-02-26 20:02:11', '2020-01-23 20:02:11', '2020-06-22 20:02:11') | -+-------------------------------------------------------------------------------+ -| 2022-02-26 20:02:11 | -+-------------------------------------------------------------------------------+ -``` - -### keywords - GREATEST diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/least.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/least.md deleted file mode 100644 index 7835d259c2b2cf..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/least.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -{ - "title": "LEAST", - "language": "en" -} ---- - - - -## least - -### description -#### Syntax - -`least(col_a, col_b, …, col_n)` - -`column` supports the following types: `TINYINT` `SMALLINT` `INT` `BIGINT` `LARGEINT` `FLOAT` `DOUBLE` `STRING` `DATETIME` `DECIMAL` - -Compare the size of `n columns` and return the smallest among them. If there is `NULL` in `column`, return `NULL`. - -### example - -``` -mysql> select least(-1, 0, 5, 8); -+--------------------+ -| least(-1, 0, 5, 8) | -+--------------------+ -| -1 | -+--------------------+ -mysql> select least(-1, 0, 5, NULL); -+-----------------------+ -| least(-1, 0, 5, NULL) | -+-----------------------+ -| NULL | -+-----------------------+ -mysql> select least(6.3, 4.29, 7.6876); -+--------------------------+ -| least(6.3, 4.29, 7.6876) | -+--------------------------+ -| 4.29 | -+--------------------------+ -mysql> select least("2022-02-26 20:02:11","2020-01-23 20:02:11","2020-06-22 20:02:11"); -+----------------------------------------------------------------------------+ -| least('2022-02-26 20:02:11', '2020-01-23 20:02:11', '2020-06-22 20:02:11') | -+----------------------------------------------------------------------------+ -| 2020-01-23 20:02:11 | -+----------------------------------------------------------------------------+ -``` - -### keywords - LEAST diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/ln.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/ln.md deleted file mode 100644 index 7f56be72fd9a64..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/ln.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "LN", - "language": "en" -} ---- - - - -## ln - -### description -#### Syntax - -`DOUBLE ln(DOUBLE x)` -Returns the natural logarithm of `x` to base `e`. - -### example - -``` -mysql> select ln(1); -+---------+ -| ln(1.0) | -+---------+ -| 0 | -+---------+ -mysql> select ln(e()); -+---------+ -| ln(e()) | -+---------+ -| 1 | -+---------+ -mysql> select ln(10); -+--------------------+ -| ln(10.0) | -+--------------------+ -| 2.3025850929940459 | -+--------------------+ -``` - -### keywords - LN diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/log.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/log.md deleted file mode 100644 index 9793bbbed9528b..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/log.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "LOG", - "language": "en" -} ---- - - - -## log - -### description -#### Syntax - -`DOUBLE log(DOUBLE b, DOUBLE x)` -Returns the logarithm of `x` based on base `b`. - -### example - -``` -mysql> select log(5,1); -+---------------+ -| log(5.0, 1.0) | -+---------------+ -| 0 | -+---------------+ -mysql> select log(3,20); -+--------------------+ -| log(3.0, 20.0) | -+--------------------+ -| 2.7268330278608417 | -+--------------------+ -mysql> select log(2,65536); -+-------------------+ -| log(2.0, 65536.0) | -+-------------------+ -| 16 | -+-------------------+ -``` - -### keywords - LOG diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/log10.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/log10.md deleted file mode 100644 index b899651d277f98..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/log10.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "LOG10", - "language": "en" -} ---- - - - -## log10 - -### description -#### Syntax - -`DOUBLE log10(DOUBLE x)` -Returns the natural logarithm of `x` to base `10`. - -### example - -``` -mysql> select log10(1); -+------------+ -| log10(1.0) | -+------------+ -| 0 | -+------------+ -mysql> select log10(10); -+-------------+ -| log10(10.0) | -+-------------+ -| 1 | -+-------------+ -mysql> select log10(16); -+--------------------+ -| log10(16.0) | -+--------------------+ -| 1.2041199826559248 | -+--------------------+ -``` - -### keywords - LOG10 diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/log2.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/log2.md deleted file mode 100644 index 8a213c2acb7921..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/log2.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "LOG2", - "language": "en" -} ---- - - - -## log2 - -### description -#### Syntax - -`DOUBLE log2(DOUBLE x)` -Returns the natural logarithm of `x` to base `2`. - -### example - -``` -mysql> select log2(1); -+-----------+ -| log2(1.0) | -+-----------+ -| 0 | -+-----------+ -mysql> select log2(2); -+-----------+ -| log2(2.0) | -+-----------+ -| 1 | -+-----------+ -mysql> select log2(10); -+--------------------+ -| log2(10.0) | -+--------------------+ -| 3.3219280948873622 | -+--------------------+ -``` - -### keywords - LOG2 diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/mod.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/mod.md deleted file mode 100644 index bdbae122aa56cf..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/mod.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "MOD", - "language": "en" -} ---- - - - -## mod - -### description -#### Syntax - -`mod(col_a, col_b)` - -`column` support type:`TINYINT` `SMALLINT` `INT` `BIGINT` `LARGEINT` `FLOAT` `DOUBLE` `DECIMAL` - -Find the remainder of a/b. For floating-point types, use the fmod function. - -### example - -``` -mysql> select mod(10, 3); -+------------+ -| mod(10, 3) | -+------------+ -| 1 | -+------------+ - -mysql> select fmod(10.1, 3.2); -+-----------------+ -| fmod(10.1, 3.2) | -+-----------------+ -| 0.50000024 | -+-----------------+ -``` - -### keywords - MOD,FMOD diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/negative.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/negative.md deleted file mode 100644 index b18757fe23dfad..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/negative.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "NEGATIVE", - "language": "en" -} ---- - - - -## negative - -### description -#### Syntax - -```sql -BIGINT negative(BIGINT x) -DOUBLE negative(DOUBLE x) -DECIMAL negative(DECIMAL x) -``` -Return `-x`. - -### example - -``` -mysql> SELECT negative(-10); -+---------------+ -| negative(-10) | -+---------------+ -| 10 | -+---------------+ -mysql> SELECT negative(12); -+--------------+ -| negative(12) | -+--------------+ -| -12 | -+--------------+ -``` - -### keywords - NEGATIVE diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/pi.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/pi.md deleted file mode 100644 index eff9d3a3795792..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/pi.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -{ - "title": "PI", - "language": "en" -} ---- - - - -## Pi - -### description -#### Syntax - -`DOUBLE Pi()` -Returns the constant `Pi` value. - -### example - -``` -mysql> select Pi(); -+--------------------+ -| pi() | -+--------------------+ -| 3.1415926535897931 | -+--------------------+ -``` - -### keywords - PI diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/pmod.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/pmod.md deleted file mode 100644 index dba803880ccb29..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/pmod.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "PMOD", - "language": "en" -} ---- - - - -## pmod - -### description -#### Syntax - -```sql -BIGINT PMOD(BIGINT x, BIGINT y) -DOUBLE PMOD(DOUBLE x, DOUBLE y) -``` -Returns the positive result of x mod y in the residue systems. -Formally, return `(x%y+y)%y`. - -### example - -``` -MySQL [test]> SELECT PMOD(13,5); -+-------------+ -| pmod(13, 5) | -+-------------+ -| 3 | -+-------------+ - -MySQL [test]> SELECT PMOD(-13,5); -+-------------+ -| pmod(-13, 5) | -+-------------+ -| 2 | -+-------------+ -``` - -### keywords - PMOD diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/positive.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/positive.md deleted file mode 100644 index 952e1a9101f2fd..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/positive.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "POSITIVE", - "language": "en" -} ---- - - - -## positive - -### description -#### Syntax - -```sql -BIGINT positive(BIGINT x) -DOUBLE positive(DOUBLE x) -DECIMAL positive(DECIMAL x) -``` -Return `x`. - -### example - -``` -mysql> SELECT positive(-10); -+---------------+ -| positive(-10) | -+---------------+ -| -10 | -+---------------+ -mysql> SELECT positive(12); -+--------------+ -| positive(12) | -+--------------+ -| 12 | -+--------------+ -``` - -### keywords - POSITIVE diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/pow.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/pow.md deleted file mode 100644 index 670c2d2de4c1f5..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/pow.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "POW", - "language": "en" -} ---- - - - -## pow - -### description -#### Syntax - -`DOUBLE pow(DOUBLE a, DOUBLE b)` -Returns `a` raised to the `b` power. - -### example - -``` -mysql> select pow(2,0); -+---------------+ -| pow(2.0, 0.0) | -+---------------+ -| 1 | -+---------------+ -mysql> select pow(2,3); -+---------------+ -| pow(2.0, 3.0) | -+---------------+ -| 8 | -+---------------+ -mysql> select pow(3,2.4); -+--------------------+ -| pow(3.0, 2.4) | -+--------------------+ -| 13.966610165238235 | -+--------------------+ -``` - -### keywords - POW diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/radians.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/radians.md deleted file mode 100644 index 5fe582bd9709ed..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/radians.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "RADIANS", - "language": "en" -} ---- - - - -## radians - -### description -#### Syntax - -`DOUBLE radians(DOUBLE x)` -Returns the value of `x` in radians, converted from degrees to radians. - -### example - -``` -mysql> select radians(0); -+--------------+ -| radians(0.0) | -+--------------+ -| 0 | -+--------------+ -mysql> select radians(30); -+---------------------+ -| radians(30.0) | -+---------------------+ -| 0.52359877559829882 | -+---------------------+ -mysql> select radians(90); -+--------------------+ -| radians(90.0) | -+--------------------+ -| 1.5707963267948966 | -+--------------------+ -``` - -### keywords - RADIANS diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/random.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/random.md deleted file mode 100644 index 53e3ba4158f986..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/random.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -{ - "title": "RANDOM", - "language": "en" -} ---- - - - -## random - -### description -#### Syntax - -`DOUBLE random()` -Returns a random number between 0 and 1. - -`DOUBLE random(DOUBLE seed)` -Returns a random number between 0 and 1, seeded with `seed`. - -`BIGINT random(BIGINT a, BIGINT b)` -Returns a random number between a and b. a must be less than b. - -Alias: `rand`. - -### example - -```sql -mysql> select random(); -+---------------------+ -| random() | -+---------------------+ -| 0.35446706030596947 | -+---------------------+ - -mysql> select rand(1.2); -+---------------------+ -| rand(1) | -+---------------------+ -| 0.13387664401253274 | -+---------------------+ -1 row in set (0.13 sec) - -mysql> select rand(1.2); -+---------------------+ -| rand(1) | -+---------------------+ -| 0.13387664401253274 | -+---------------------+ -1 row in set (0.11 sec) - -mysql> select rand(-20, -10); -+------------------+ -| random(-20, -10) | -+------------------+ -| -13 | -+------------------+ -1 row in set (0.10 sec) -``` - -### keywords - RANDOM, RAND diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/round-bankers.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/round-bankers.md deleted file mode 100644 index 3115cbf9be5131..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/round-bankers.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -{ - "title": "ROUND_BANKERS", - "language": "en" -} ---- - - - -## round_bankers - -### description -#### Syntax - -`T round_bankers(T x[, d])` -Rounds the argument `x` to `d` specified decimal places. `d` defaults to 0 if not specified. If d is negative, the left d digits of the decimal point are 0. If x or d is null, null is returned. - -+ If the rounding number is halfway between two numbers, the function uses banker’s rounding. -+ In other cases, the function rounds numbers to the nearest integer. - - - -### example - -``` -mysql> select round_bankers(0.4); -+--------------------+ -| round_bankers(0.4) | -+--------------------+ -| 0 | -+--------------------+ -mysql> select round_bankers(-3.5); -+---------------------+ -| round_bankers(-3.5) | -+---------------------+ -| -4 | -+---------------------+ -mysql> select round_bankers(-3.4); -+---------------------+ -| round_bankers(-3.4) | -+---------------------+ -| -3 | -+---------------------+ -mysql> select round_bankers(10.755, 2); -+--------------------------+ -| round_bankers(10.755, 2) | -+--------------------------+ -| 10.76 | -+--------------------------+ -mysql> select round_bankers(1667.2725, 2); -+-----------------------------+ -| round_bankers(1667.2725, 2) | -+-----------------------------+ -| 1667.27 | -+-----------------------------+ -mysql> select round_bankers(1667.2725, -2); -+------------------------------+ -| round_bankers(1667.2725, -2) | -+------------------------------+ -| 1700 | -+------------------------------+ -``` - -### keywords - round_bankers diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/round.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/round.md deleted file mode 100644 index f34519acb602e4..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/round.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -{ - "title": "ROUND", - "language": "en" -} ---- - - - -## round - -### description -#### Syntax - -`T round(T x[, d])` -Rounds the argument `x` to `d` decimal places. `d` defaults to 0 if not specified. If d is negative, the left d digits of the decimal point are 0. If x or d is null, null is returned. -2.5 will round up to 3. If you want to round down to 2, please use the round_bankers function. - -### example - -``` -mysql> select round(2.4); -+------------+ -| round(2.4) | -+------------+ -| 2 | -+------------+ -mysql> select round(2.5); -+------------+ -| round(2.5) | -+------------+ -| 3 | -+------------+ -mysql> select round(-3.4); -+-------------+ -| round(-3.4) | -+-------------+ -| -3 | -+-------------+ -mysql> select round(-3.5); -+-------------+ -| round(-3.5) | -+-------------+ -| -4 | -+-------------+ -mysql> select round(1667.2725, 2); -+---------------------+ -| round(1667.2725, 2) | -+---------------------+ -| 1667.27 | -+---------------------+ -mysql> select round(1667.2725, -2); -+----------------------+ -| round(1667.2725, -2) | -+----------------------+ -| 1700 | -+----------------------+ -``` - -### keywords - ROUND diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/running-difference.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/running-difference.md deleted file mode 100644 index 3800d024cd78cd..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/running-difference.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -{ - "title": "RUNNING_DIFFERENCE", - "language": "en" -} ---- - - - -## running_difference - -### description -#### Syntax - -`T running_difference(T x)` - -Calculates the difference between successive row values ​​in the data block. -The result of the function depends on the affected data blocks and the order of data in the block. - -The rows order used during the calculation of running_difference can differ from the order of rows returned to the user. The function will be deprecated in the future. Please use window function instead, below is the example: -```sql --- running difference(x) -SELECT running_difference(x) FROM t ORDER BY k; - --- window function -SELECT x - lag(x, 1, 0) OVER (ORDER BY k) FROM t; -``` - -#### Arguments -`x` - A list of data.TINYINT,SMALLINT,INT,BIGINT,LARGEINT,FLOAT,DOUBLE,DATE,DATETIME,DECIMAL - -##### Returned value -Returns 0 for the first row and the difference from the previous row for each subsequent row - -### example - -```sql -DROP TABLE IF EXISTS running_difference_test; - -CREATE TABLE running_difference_test ( - `id` int NOT NULL COMMENT 'id', - `day` date COMMENT 'day', - `time_val` datetime COMMENT 'time_val', - `doublenum` double NULL COMMENT 'doublenum' -) -DUPLICATE KEY(id) -DISTRIBUTED BY HASH(id) BUCKETS 3 -PROPERTIES ( - "replication_num" = "1" -); - -INSERT into running_difference_test (id, day, time_val,doublenum) values ('1', '2022-10-28', '2022-03-12 10:41:00', null), - ('2','2022-10-27', '2022-03-12 10:41:02', 2.6), - ('3','2022-10-28', '2022-03-12 10:41:03', 2.5), - ('4','2022-9-29', '2022-03-12 10:41:03', null), - ('5','2022-10-31', '2022-03-12 10:42:01', 3.3), - ('6', '2022-11-08', '2022-03-12 11:05:04', 4.7); - -SELECT * from running_difference_test ORDER BY id ASC; - -+------+------------+---------------------+-----------+ -| id | day | time_val | doublenum | -+------+------------+---------------------+-----------+ -| 1 | 2022-10-28 | 2022-03-12 10:41:00 | NULL | -| 2 | 2022-10-27 | 2022-03-12 10:41:02 | 2.6 | -| 3 | 2022-10-28 | 2022-03-12 10:41:03 | 2.5 | -| 4 | 2022-09-29 | 2022-03-12 10:41:03 | NULL | -| 5 | 2022-10-31 | 2022-03-12 10:42:01 | 3.3 | -| 6 | 2022-11-08 | 2022-03-12 11:05:04 | 4.7 | -+------+------------+---------------------+-----------+ - -SELECT - id, - running_difference(id) AS delta -FROM -( - SELECT - id, - day, - time_val, - doublenum - FROM running_difference_test -)as runningDifference ORDER BY id ASC; - -+------+-------+ -| id | delta | -+------+-------+ -| 1 | 0 | -| 2 | 1 | -| 3 | 1 | -| 4 | 1 | -| 5 | 1 | -| 6 | 1 | -+------+-------+ - -SELECT - day, - running_difference(day) AS delta -FROM -( - SELECT - id, - day, - time_val, - doublenum - FROM running_difference_test -)as runningDifference ORDER BY id ASC; - -+------------+-------+ -| day | delta | -+------------+-------+ -| 2022-10-28 | 0 | -| 2022-10-27 | -1 | -| 2022-10-28 | 1 | -| 2022-09-29 | -29 | -| 2022-10-31 | 32 | -| 2022-11-08 | 8 | -+------------+-------+ - -SELECT - time_val, - running_difference(time_val) AS delta -FROM -( - SELECT - id, - day, - time_val, - doublenum - FROM running_difference_test -)as runningDifference ORDER BY id ASC; - -+---------------------+-------+ -| time_val | delta | -+---------------------+-------+ -| 2022-03-12 10:41:00 | 0 | -| 2022-03-12 10:41:02 | 2 | -| 2022-03-12 10:41:03 | 1 | -| 2022-03-12 10:41:03 | 0 | -| 2022-03-12 10:42:01 | 58 | -| 2022-03-12 11:05:04 | 1383 | -+---------------------+-------+ - -SELECT - doublenum, - running_difference(doublenum) AS delta -FROM -( - SELECT - id, - day, - time_val, - doublenum - FROM running_difference_test -)as runningDifference ORDER BY id ASC; - -+-----------+----------------------+ -| doublenum | delta | -+-----------+----------------------+ -| NULL | NULL | -| 2.6 | NULL | -| 2.5 | -0.10000000000000009 | -| NULL | NULL | -| 3.3 | NULL | -| 4.7 | 1.4000000000000004 | -+-----------+----------------------+ - -``` - -### keywords - -running_difference \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/sign.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/sign.md deleted file mode 100644 index 4f98136466eed0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/sign.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "SIGN", - "language": "en" -} ---- - - - -## sign - -### description -#### Syntax - -`TINYINT sign(DOUBLE x)` -Returns the sign of `x`. Negative, zero or positive numbers correspond to -1, 0 or 1 respectively. - -### example - -``` -mysql> select sign(3); -+-----------+ -| sign(3.0) | -+-----------+ -| 1 | -+-----------+ -mysql> select sign(0); -+-----------+ -| sign(0.0) | -+-----------+ -| 0 | -mysql> select sign(-10.0); -+-------------+ -| sign(-10.0) | -+-------------+ -| -1 | -+-------------+ -1 row in set (0.01 sec) -``` - -### keywords - SIGN diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/sin.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/sin.md deleted file mode 100644 index d49a17b070b656..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/sin.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "SIN", - "language": "en" -} ---- - - - -## sin - -### description -#### Syntax - -`DOUBLE sin(DOUBLE x)` -Returns the sine of `x`, where `x` is in radians - -### example - -``` -mysql> select sin(0); -+----------+ -| sin(0.0) | -+----------+ -| 0 | -+----------+ -mysql> select sin(1); -+--------------------+ -| sin(1.0) | -+--------------------+ -| 0.8414709848078965 | -+--------------------+ -mysql> select sin(0.5 * Pi()); -+-----------------+ -| sin(0.5 * pi()) | -+-----------------+ -| 1 | -+-----------------+ -``` - -### keywords - SIN diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/sqrt.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/sqrt.md deleted file mode 100644 index 9713b42af2ea88..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/sqrt.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -{ - "title": "SQRT", - "language": "en" -} ---- - - - -## sqrt - -### description -#### Syntax - -`DOUBLE sqrt(DOUBLE x)` -Returns the square root of `x`.`x` is required to be greater than or equal to `0`. - -### example - -``` -mysql> select sqrt(9); -+-----------+ -| sqrt(9.0) | -+-----------+ -| 3 | -+-----------+ -mysql> select sqrt(2); -+--------------------+ -| sqrt(2.0) | -+--------------------+ -| 1.4142135623730951 | -+--------------------+ -mysql> select sqrt(100.0); -+-------------+ -| sqrt(100.0) | -+-------------+ -| 10 | -+-------------+ -``` - -### keywords - SQRT diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/tan.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/tan.md deleted file mode 100644 index bd95b3b3e7f111..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/tan.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -{ - "title": "TAN", - "language": "en" -} ---- - - - -## tan - -### description -#### Syntax - -`DOUBLE tan(DOUBLE x)` -Returns the tangent of `x`, where `x` is in radians. - -### example - -``` -mysql> select tan(0); -+----------+ -| tan(0.0) | -+----------+ -| 0 | -+----------+ -mysql> select tan(1); -+--------------------+ -| tan(1.0) | -+--------------------+ -| 1.5574077246549023 | -+--------------------+ -``` - -### keywords - TAN diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/tanh.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/tanh.md deleted file mode 100644 index 8133635489573a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/tanh.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -{ - "title": "TANH", - "language": "en" -} ---- - - - -## tanh - -### description -#### Syntax - -`DOUBLE tanh(DOUBLE x)` -Returns the hyperbolic tangent of `x`, tanh(x) = sinh(x) / cosh(x). - -### example - -``` -mysql> select tanh(0); -+---------+ -| tanh(0) | -+---------+ -| 0 | -+---------+ - -mysql> select tanh(1); -+---------------------+ -| tanh(1) | -+---------------------+ -| 0.76159415595576485 | -+---------------------+ -``` - -### keywords - TANH diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/truncate.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/truncate.md deleted file mode 100644 index ca0e3ccc4a4682..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/truncate.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -{ - "title": "TRUNCATE", - "language": "en" -} ---- - - - -## truncate - -### description -#### Syntax - -`DOUBLE truncate(DOUBLE x, INT d)` -Numerically truncate `x` according to the number of decimal places `d`. - -The rules are as follows: -When `d > 0`: keep `d` decimal places of `x` -When `d = 0`: remove the fractional part of `x` and keep only the integer part -When `d < 0`: Remove the fractional part of `x`, and replace the integer part with the number `0` according to the number of digits specified by `d` - -### example - -``` -mysql> select truncate(124.3867, 2); -+-----------------------+ -| truncate(124.3867, 2) | -+-----------------------+ -| 124.38 | -+-----------------------+ -mysql> select truncate(124.3867, 0); -+-----------------------+ -| truncate(124.3867, 0) | -+-----------------------+ -| 124 | -+-----------------------+ -mysql> select truncate(-124.3867, -2); -+-------------------------+ -| truncate(-124.3867, -2) | -+-------------------------+ -| -100 | -+-------------------------+ -``` - -### keywords - TRUNCATE diff --git a/docs/en/docs/sql-manual/sql-functions/numeric-functions/uuid_numeric.md b/docs/en/docs/sql-manual/sql-functions/numeric-functions/uuid_numeric.md deleted file mode 100644 index 07d7ec3f9c58a4..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/numeric-functions/uuid_numeric.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -{ - "title": "uuid_numeric", - "language": "en" -} ---- - - - -## uuid_numeric -### description -#### Syntax - -`LARGEINT uuid_numeric()` - -Return a uuid in type `LARGEINT`. - -Note that `LARGEINT` has type Int128, so we could get a negative number from `uuid_numeric()`. - -### example - -``` - -mysql> select uuid_numeric(); -+----------------------------------------+ -| uuid_numeric() | -+----------------------------------------+ -| 82218484683747862468445277894131281464 | -+----------------------------------------+ -``` - -### keywords - - UUID UUID-NUMERIC diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-angle-sphere.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-angle-sphere.md deleted file mode 100644 index ca99aeb9125dd8..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-angle-sphere.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -{ - "title": "ST_ANGLE_SPHERE", - "language": "en" -} ---- - - - -## ST_Angle_Sphere -### description -#### Syntax - -`DOUBLE ST_Angle_Sphere(DOUBLE x_lng, DOUBLE x_lat, DOUBLE y_lng, DOUBLE y_lat)` - - -Calculates the central angle between two points on the Earth's surface. The incoming parameters are the longitude of point X, the latitude of point X, the longitude of point Y and the latitude of point Y. - -x_lng and y_lng are Longitude values, must be in the range [-180, 180]. -x_lat and y_lat are Latitude values, must be in the range [-90, 90]. - -### example - -``` -mysql> select ST_Angle_Sphere(116.35620117, 39.939093, 116.4274406433, 39.9020987219); -+---------------------------------------------------------------------------+ -| st_angle_sphere(116.35620117, 39.939093, 116.4274406433, 39.9020987219) | -+---------------------------------------------------------------------------+ -| 0.0659823452409903 | -+---------------------------------------------------------------------------+ -1 row in set (0.06 sec) - -mysql> select ST_Angle_Sphere(0, 0, 45, 0); -+----------------------------------------+ -| st_angle_sphere(0.0, 0.0, 45.0, 0.0) | -+----------------------------------------+ -| 45 | -+----------------------------------------+ -1 row in set (0.06 sec) -``` -### keywords -ST_ANGLE_SPHERE,ST,ANGLE,SPHERE diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-angle.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-angle.md deleted file mode 100644 index bccc92b06034ca..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-angle.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -{ - "title": "ST_ANGLE", - "language": "en" -} ---- - - - -## ST_Angle - -### Syntax - -`DOUBLE ST_Angle(GEOPOINT point1, GEOPOINT point2, GEOPOINT point3)` - -### description - -Enter three point, which represent two intersecting lines. Returns the angle between these lines. Point 2 and point 1 represent the first line and point 2 and point 3 represent the second line. The angle between these lines is in radians, in the range [0, 2pi). The angle is measured clockwise from the first line to the second line. - -ST_ANGLE has the following edge cases: - -* If points 2 and 3 are the same, returns NULL. -* If points 2 and 1 are the same, returns NULL. -* If points 2 and 3 are exactly antipodal, returns NULL. -* If points 2 and 1 are exactly antipodal, returns NULL. -* If any of the input geographies are not single points or are the empty geography, then throws an error. - -### example - -``` -mysql> SELECT ST_Angle(ST_Point(1, 0),ST_Point(0, 0),ST_Point(0, 1)); -+----------------------------------------------------------------------+ -| st_angle(st_point(1.0, 0.0), st_point(0.0, 0.0), st_point(0.0, 1.0)) | -+----------------------------------------------------------------------+ -| 4.71238898038469 | -+----------------------------------------------------------------------+ -1 row in set (0.04 sec) - -mysql> SELECT ST_Angle(ST_Point(0, 0),ST_Point(1, 0),ST_Point(0, 1)); -+----------------------------------------------------------------------+ -| st_angle(st_point(0.0, 0.0), st_point(1.0, 0.0), st_point(0.0, 1.0)) | -+----------------------------------------------------------------------+ -| 0.78547432161873854 | -+----------------------------------------------------------------------+ -1 row in set (0.02 sec) - -mysql> SELECT ST_Angle(ST_Point(1, 0),ST_Point(0, 0),ST_Point(1, 0)); -+----------------------------------------------------------------------+ -| st_angle(st_point(1.0, 0.0), st_point(0.0, 0.0), st_point(1.0, 0.0)) | -+----------------------------------------------------------------------+ -| 0 | -+----------------------------------------------------------------------+ -1 row in set (0.02 sec) - -mysql> SELECT ST_Angle(ST_Point(1, 0),ST_Point(0, 0),ST_Point(0, 0)); -+----------------------------------------------------------------------+ -| st_angle(st_point(1.0, 0.0), st_point(0.0, 0.0), st_point(0.0, 0.0)) | -+----------------------------------------------------------------------+ -| NULL | -+----------------------------------------------------------------------+ -1 row in set (0.03 sec) - -mysql> SELECT ST_Angle(ST_Point(0, 0),ST_Point(-30, 0),ST_Point(150, 0)); -+--------------------------------------------------------------------------+ -| st_angle(st_point(0.0, 0.0), st_point(-30.0, 0.0), st_point(150.0, 0.0)) | -+--------------------------------------------------------------------------+ -| NULL | -+--------------------------------------------------------------------------+ -1 row in set (0.02 sec) -``` -### keywords -ST_ANGLE,ST,ANGLE diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-area.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-area.md deleted file mode 100644 index b9f38dc13582bc..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-area.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -{ - "title": "ST_AREA", - "language": "en" -} ---- - - - -## ST_Area_Square_Meters,ST_Area_Square_Km - -### Syntax - -```sql -DOUBLE ST_Area_Square_Meters(GEOMETRY geo) -DOUBLE ST_Area_Square_Km(GEOMETRY geo) -``` - -### description - -Calculate the area of the area on the earth's sphere. Currently, the parameter geo supports St_Point, St_LineString, St_Circle and St_Polygon. - -Returns zero if the input is St_Point, St_LineString. - -Among them, the unit returned by ST_Area_Square_Meters (GEOMETRY geo) is square meters, and the unit returned by ST_Area_Square_Km (GEOMETRY geo) is square kilometers. - -### example - -``` -mysql> SELECT ST_Area_Square_Meters(ST_Circle(0, 0, 1)); -+-------------------------------------------------+ -| st_area_square_meters(st_circle(0.0, 0.0, 1.0)) | -+-------------------------------------------------+ -| 3.1415926535897869 | -+-------------------------------------------------+ -1 row in set (0.04 sec) - -mysql> SELECT ST_Area_Square_Km(ST_Polygon("POLYGON ((0 0, 1 0, 1 1, 0 1, 0 0))")); -+----------------------------------------------------------------------+ -| st_area_square_km(st_polygon('POLYGON ((0 0, 1 0, 1 1, 0 1, 0 0))')) | -+----------------------------------------------------------------------+ -| 12364.036567076409 | -+----------------------------------------------------------------------+ -1 row in set (0.01 sec) - -mysql> SELECT ST_Area_Square_Meters(ST_Point(0, 1)); -+-------------------------------------------+ -| st_area_square_meters(st_point(0.0, 1.0)) | -+-------------------------------------------+ -| 0 | -+-------------------------------------------+ -1 row in set (0.05 sec) - -mysql> SELECT ST_Area_Square_Meters(ST_LineFromText("LINESTRING (1 1, 2 2)")); -+-----------------------------------------------------------------+ -| st_area_square_meters(st_linefromtext('LINESTRING (1 1, 2 2)')) | -+-----------------------------------------------------------------+ -| 0 | -+-----------------------------------------------------------------+ -1 row in set (0.03 sec) -``` -### keywords -ST_Area_Square_Meters,ST_Area_Square_Km,ST_Area,ST,Area diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-asbinary.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-asbinary.md deleted file mode 100644 index d349861bbf19db..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-asbinary.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -{ - "title": "ST_ASBINARY", - "language": "en" -} ---- - - - -## ST_AsBinary - -### Syntax - -`VARCHAR ST_AsBinary(GEOMETRY geo)` - -### Description - -Converting a geometric figure into a standard WKB (Well-known binary) representation - -Currently supported geometric figures are: Point, LineString, Polygon. - -### example - -``` -mysql> select ST_AsBinary(st_point(24.7, 56.7)); -+----------------------------------------------+ -| st_asbinary(st_point(24.7, 56.7)) | -+----------------------------------------------+ -| \x01010000003333333333b338409a99999999594c40 | -+----------------------------------------------+ -1 row in set (0.01 sec) - -mysql> select ST_AsBinary(ST_GeometryFromText("LINESTRING (1 1, 2 2)")); -+--------------------------------------------------------------------------------------+ -| st_asbinary(st_geometryfromtext('LINESTRING (1 1, 2 2)')) | -+--------------------------------------------------------------------------------------+ -| \x010200000002000000000000000000f03f000000000000f03f00000000000000400000000000000040 | -+--------------------------------------------------------------------------------------+ -1 row in set (0.04 sec) - -mysql> select ST_AsBinary(ST_Polygon("POLYGON ((114.104486 22.547119,114.093758 22.547753,114.096504 22.532057,114.104229 22.539826,114.106203 22.542680,114.104486 22.547119))")); -+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| st_asbinary(st_polygon('POLYGON ((114.104486 22.547119,114.093758 22.547753,114.096504 22.532057,114.104229 22.539826,114.106203 22.542680,114.104486 22.547119))')) | -+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| \x01030000000100000006000000f3380ce6af865c402d05a4fd0f8c364041ef8d2100865c403049658a398c3640b9fb1c1f2d865c409d9b36e334883640de921cb0ab865c40cf876709328a36402cefaa07cc865c407b319413ed8a3640f3380ce6af865c402d05a4fd0f8c3640 | -+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -1 row in set (0.02 sec) - -``` -### keywords -ST_ASBINARY,ST,ASBINARY diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-astext.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-astext.md deleted file mode 100644 index b1990bb38353a4..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-astext.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ - "title": "ST_ASTEXT,ST_ASWKT", - "language": "en" -} ---- - - - -## ST_AsText,ST_AsWKT -### Description -#### Syntax - -`VARCHAR ST_AsText (GEOMETRY geo)` - - -Converting a geometric figure into a WKT (Well Known Text) representation - -### example - -``` -mysql> SELECT ST_AsText(ST_Point(24.7, 56.7)); -+---------------------------------+ -| st_astext(st_point(24.7, 56.7)) | -+---------------------------------+ -| POINT (24.7 56.7) | -+---------------------------------+ -``` -### keywords -ST_ASTEXT,ST_ASWKT,ST,ASTEXT,ASWKT diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-azimuth.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-azimuth.md deleted file mode 100644 index bb7b9ed19e41a8..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-azimuth.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -{ - "title": "ST_AZIMUTH", - "language": "en" -} ---- - - - -## ST_Azimuth - -### Syntax - -`DOUBLE ST_Azimuth(GEOPOINT point1, GEOPOINT point2)` - -### description - -Enter two point, and returns the azimuth of the line segment formed by points 1 and 2. The azimuth is the angle in radians measured between the line from point 1 facing true North to the line segment from point 1 to point 2. - -The positive angle is measured clockwise on the surface of a sphere. For example, the azimuth for a line segment: - -* Pointing North is 0 -* Pointing East is PI/2 -* Pointing South is PI -* Pointing West is 3PI/2 - -ST_Azimuth has the following edge cases: - -* If the two input points are the same, returns NULL. -* If the two input points are exactly antipodal, returns NULL. -* If either of the input geographies are not single points or are the empty geography, throws an error. - -### example - -``` -mysql> SELECT st_azimuth(ST_Point(1, 0),ST_Point(0, 0)); -+----------------------------------------------------+ -| st_azimuth(st_point(1.0, 0.0), st_point(0.0, 0.0)) | -+----------------------------------------------------+ -| 4.71238898038469 | -+----------------------------------------------------+ -1 row in set (0.03 sec) - -mysql> SELECT st_azimuth(ST_Point(0, 0),ST_Point(1, 0)); -+----------------------------------------------------+ -| st_azimuth(st_point(0.0, 0.0), st_point(1.0, 0.0)) | -+----------------------------------------------------+ -| 1.5707963267948966 | -+----------------------------------------------------+ -1 row in set (0.01 sec) - -mysql> SELECT st_azimuth(ST_Point(0, 0),ST_Point(0, 1)); -+----------------------------------------------------+ -| st_azimuth(st_point(0.0, 0.0), st_point(0.0, 1.0)) | -+----------------------------------------------------+ -| 0 | -+----------------------------------------------------+ -1 row in set (0.01 sec) - -mysql> SELECT st_azimuth(ST_Point(0, 1),ST_Point(0, 1)); -+----------------------------------------------------+ -| st_azimuth(st_point(0.0, 1.0), st_point(0.0, 1.0)) | -+----------------------------------------------------+ -| NULL | -+----------------------------------------------------+ -1 row in set (0.02 sec) - -mysql> SELECT st_azimuth(ST_Point(-30, 0),ST_Point(150, 0)); -+--------------------------------------------------------+ -| st_azimuth(st_point(-30.0, 0.0), st_point(150.0, 0.0)) | -+--------------------------------------------------------+ -| NULL | -+--------------------------------------------------------+ -1 row in set (0.02 sec) - -``` -### keywords -ST_AZIMUTH,ST,AZIMUTH diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-circle.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-circle.md deleted file mode 100644 index 5b9750a1cf021c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-circle.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "ST_CIRCLE", - "language": "en" -} ---- - - - -## ST_Circle -### Description -#### Syntax - -`GEOMETRY ST_Circle(DOUBLE center_lng, DOUBLE center_lat, DOUBLE radius)` - - -Convert a WKT (Well Known Text) into a circle on the earth's sphere. Where `center_lng'denotes the longitude of the center of a circle, -` Center_lat` denotes the latitude of the center of a circle, radius` denotes the radius of a circle in meters. - -### example - -``` -mysql> SELECT ST_AsText(ST_Circle(111, 64, 10000)); -+--------------------------------------------+ -| st_astext(st_circle(111.0, 64.0, 10000.0)) | -+--------------------------------------------+ -| CIRCLE ((111 64), 10000) | -+--------------------------------------------+ -``` -### keywords -ST_CIRCLE,ST,CIRCLE diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-contains.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-contains.md deleted file mode 100644 index a3ab1214b0ccfe..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-contains.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "ST_CONTAINS", - "language": "en" -} ---- - - - -## ST_Contains -### Description -#### Syntax - -`BOOL ST_Contains(GEOMETRY shape1, GEOMETRY shape2)` - - -Judging whether geometric shape 1 can contain geometric shape 2 completely - -### example - - -``` -mysql> SELECT ST_Contains(ST_Polygon("POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))"), ST_Point(5, 5)); -+----------------------------------------------------------------------------------------+ -| st_contains(st_polygon('POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))'), st_point(5.0, 5.0)) | -+----------------------------------------------------------------------------------------+ -| 1 | -+----------------------------------------------------------------------------------------+ - -mysql> SELECT ST_Contains(ST_Polygon("POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))"), ST_Point(50, 50)); -+------------------------------------------------------------------------------------------+ -| st_contains(st_polygon('POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))'), st_point(50.0, 50.0)) | -+------------------------------------------------------------------------------------------+ -| 0 | -+------------------------------------------------------------------------------------------+ -``` -### keywords -ST_CONTAINS,ST,CONTAINS diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-distance-sphere.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-distance-sphere.md deleted file mode 100644 index a50fc65625b1fa..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-distance-sphere.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "ST_DISTANCE_SPHERE", - "language": "en" -} ---- - - - -## ST_Distance_Sphere -### description -#### Syntax - -`DOUBLE ST_Distance_Sphere(DOUBLE x_lng, DOUBLE x_lat, DOUBLE y_lng, DOUBLE y_lat)` - - -Calculate the spherical distance between two points of the earth in meters. The incoming parameters are the longitude of point X, the latitude of point X, the longitude of point Y and the latitude of point Y. - -x_lng and y_lng are Longitude values, must be in the range [-180, 180]. -x_lat and y_lat are Latitude values, must be in the range [-90, 90]. - -### example - -``` -mysql> select st_distance_sphere(116.35620117, 39.939093, 116.4274406433, 39.9020987219); -+----------------------------------------------------------------------------+ -| st_distance_sphere(116.35620117, 39.939093, 116.4274406433, 39.9020987219) | -+----------------------------------------------------------------------------+ -| 7336.9135549995917 | -+----------------------------------------------------------------------------+ -``` -### keywords -ST_DISTANCE_SPHERE,ST,DISTANCE,SPHERE diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-geometryfromtext.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-geometryfromtext.md deleted file mode 100644 index 28072a02016d03..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-geometryfromtext.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ - "title": "ST_GEOMETRYFROMTEXT,ST_GEOMFROMTEXT", - "language": "en" -} ---- - - - -## ST_GeometryFromText,ST_GeomFromText -### Description -#### Syntax - -`GEOMETRY ST_GeometryFromText (VARCHAR wkt)` - - -Converting a WKT (Well Known Text) into a corresponding memory geometry - -### example - -``` -mysql> SELECT ST_AsText(ST_GeometryFromText("LINESTRING (1 1, 2 2)")); -+---------------------------------------------------------+ -| st_astext(st_geometryfromtext('LINESTRING (1 1, 2 2)')) | -+---------------------------------------------------------+ -| LINESTRING (1 1, 2 2) | -+---------------------------------------------------------+ -``` -### keywords -ST_GEOMETRYFROMTEXT,ST_GEOMFROMTEXT,ST,GEOMETRYFROMTEXT,GEOMFROMTEXT diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-geometryfromwkb.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-geometryfromwkb.md deleted file mode 100644 index bf0a47f4b5903b..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-geometryfromwkb.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -{ - "title": "ST_GEOMETRYFROMWKB,ST_GEOMFROMWKB", - "language": "en" -} ---- - - - -## ST_GeometryFromWKB,ST_GeomFromWKB - -### Syntax - -`GEOMETRY ST_GeometryFromWKB(VARCHAR WKB)` - -### Description - -Converting a standard WKB (Well-known binary) into a corresponding memory geometry - -### example - -``` -mysql> select ST_AsText(ST_GeometryFromWKB(ST_AsBinary(ST_Point(24.7, 56.7)))); -+------------------------------------------------------------------+ -| st_astext(st_geometryfromwkb(st_asbinary(st_point(24.7, 56.7)))) | -+------------------------------------------------------------------+ -| POINT (24.7 56.7) | -+------------------------------------------------------------------+ -1 row in set (0.05 sec) - -mysql> select ST_AsText(ST_GeomFromWKB(ST_AsBinary(ST_Point(24.7, 56.7)))); -+--------------------------------------------------------------+ -| st_astext(st_geomfromwkb(st_asbinary(st_point(24.7, 56.7)))) | -+--------------------------------------------------------------+ -| POINT (24.7 56.7) | -+--------------------------------------------------------------+ -1 row in set (0.03 sec) - -mysql> select ST_AsText(ST_GeometryFromWKB(ST_AsBinary(ST_GeometryFromText("LINESTRING (1 1, 2 2)")))); -+------------------------------------------------------------------------------------------+ -| st_astext(st_geometryfromwkb(st_asbinary(st_geometryfromtext('LINESTRING (1 1, 2 2)')))) | -+------------------------------------------------------------------------------------------+ -| LINESTRING (1 1, 2 2) | -+------------------------------------------------------------------------------------------+ -1 row in set (0.06 sec) - -mysql> select ST_AsText(ST_GeometryFromWKB(ST_AsBinary(ST_Polygon("POLYGON ((114.104486 22.547119,114.093758 22.547753,114.096504 22.532057,114.104229 22.539826,114.106203 22.542680,114.104486 22.547119))")))); -+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| st_astext(st_geometryfromwkb(st_asbinary(st_polygon('POLYGON ((114.104486 22.547119,114.093758 22.547753,114.096504 22.532057,114.104229 22.539826,114.106203 22.542680,114.104486 22.547119))')))) | -+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| POLYGON ((114.104486 22.547119, 114.093758 22.547753, 114.096504 22.532057, 114.104229 22.539826, 114.106203 22.54268, 114.104486 22.547119)) | -+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -1 row in set (0.03 sec) - -mysql> select ST_AsText(ST_GeomFromWKB(ST_AsBinary(ST_Polygon("POLYGON ((114.104486 22.547119,114.093758 22.547753,114.096504 22.532057,114.104229 22.539826,114.106203 22.542680,114.104486 22.547119))")))); -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| st_astext(st_geomfromwkb(st_asbinary(st_polygon('POLYGON ((114.104486 22.547119,114.093758 22.547753,114.096504 22.532057,114.104229 22.539826,114.106203 22.542680,114.104486 22.547119))')))) | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| POLYGON ((114.104486 22.547119, 114.093758 22.547753, 114.096504 22.532057, 114.104229 22.539826, 114.106203 22.54268, 114.104486 22.547119)) | -+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -1 row in set (0.03 sec) - -``` -### keywords -ST_GEOMETRYFROMWKB,ST_GEOMFROMWKB,ST,GEOMETRYFROMWKB,GEOMFROMWKB,WKB diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-linefromtext.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-linefromtext.md deleted file mode 100644 index 3a033103b15802..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-linefromtext.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ - "title": "ST_LINEFROMTEXT,ST_LINESTRINGFROMTEXT", - "language": "en" -} ---- - - - -## ST_LineFromText,ST_LineStringFromText -### Description -#### Syntax - -`GEOMETRY ST LineFromText (VARCHAR wkt)` - - -Converting a WKT (Well Known Text) into a Line-style memory representation - -### example - -``` -mysql> SELECT ST_AsText(ST_LineFromText("LINESTRING (1 1, 2 2)")); -+---------------------------------------------------------+ -| st_astext(st_geometryfromtext('LINESTRING (1 1, 2 2)')) | -+---------------------------------------------------------+ -| LINESTRING (1 1, 2 2) | -+---------------------------------------------------------+ -``` -### keywords -ST_LINEFROMTEXT, ST_LINESTRINGFROMTEXT,ST,LINEFROMTEXT,LINESTRINGFROMTEXT diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-point.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-point.md deleted file mode 100644 index 050f41a31ad88a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-point.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "ST_POINT", - "language": "en" -} ---- - - - -## St_Point -### Description -#### Syntax - -`POINT ST_Point(DOUBLE x, DOUBLE y)` - - -Given the X coordinate value, the Y coordinate value returns the corresponding Point. -The current value is meaningful only for spherical sets, and X/Y corresponds to longitude/latitude. - -### example - -``` -mysql> SELECT ST_AsText(ST_Point(24.7, 56.7)); -+---------------------------------+ -| st_astext(st_point(24.7, 56.7)) | -+---------------------------------+ -| POINT (24.7 56.7) | -+---------------------------------+ -``` -### keywords -ST_POINT,ST,POINT diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-polygon.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-polygon.md deleted file mode 100644 index 226736bb2d64c0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-polygon.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "ST_POLYGON,ST_POLYFROMTEXT,ST_POLYGONFROMTEXT", - "language": "en" -} ---- - - - -## ST_Polygon,ST_PolyFromText,ST_PolygonFromText -### Description -#### Syntax - -`GEOMETRY ST_Polygon (VARCHAR wkt)` - - -Converting a WKT (Well Known Text) into a corresponding polygon memory form - - -#### example - -``` -mysql> SELECT ST_AsText(ST_Polygon("POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))")); -+------------------------------------------------------------------+ -| st_astext(st_polygon('POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))')) | -+------------------------------------------------------------------+ -| POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0)) | -+------------------------------------------------------------------+ -``` -### keywords -ST_POLYGON,ST_POLYFROMTEXT,ST_POLYGONFROMTEXT,ST,POLYGON,POLYFROMTEXT,POLYGONFROMTEXT diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-x.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-x.md deleted file mode 100644 index a00eb050cfc13c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-x.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ - "title": "ST_X", - "language": "en" -} ---- - - - -## ST_X -### Description -#### Syntax - -`DOUBLE ST_X(POINT point)` - - -When point is a valid POINT type, the corresponding X coordinate value is returned. - -### example - -``` -mysql> SELECT ST_X(ST_Point(24.7, 56.7)); -+----------------------------+ -| st_x(st_point(24.7, 56.7)) | -+----------------------------+ -| 24.7 | -+----------------------------+ -``` -### keywords -ST_X,ST,X diff --git a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-y.md b/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-y.md deleted file mode 100644 index eec67131b9ad37..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/spatial-functions/st-y.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ - "title": "ST_Y", - "language": "en" -} ---- - - - -## ST_Y -### Description -#### Syntax - -`DOUBLE ST_Y(POINT point)` - - -When point is a valid POINT type, the corresponding Y coordinate value is returned. - -### example - -``` -mysql> SELECT ST_Y(ST_Point(24.7, 56.7)); -+----------------------------+ -| st_y(st_point(24.7, 56.7)) | -+----------------------------+ -| 56.7 | -+----------------------------+ -``` -### keywords -ST_Y,ST,Y diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/append-trailing-char-if-absent.md b/docs/en/docs/sql-manual/sql-functions/string-functions/append-trailing-char-if-absent.md deleted file mode 100644 index 4a4c13ece4f6e4..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/append-trailing-char-if-absent.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ - "title": "APPEND_TRAILING_CHAR_IF_ABSENT", - "language": "en" -} ---- - - - -## append_trailing_char_if_absent - -### description - -#### Syntax - -`VARCHAR append_trailing_char_if_absent(VARCHAR str, VARCHAR trailing_char)` - -If the @str string is non-empty and does not contain the @trailing_char character at the end, it appends the @trailing_char character to the end. -@trailing_char contains only one character, and it will return NULL if contains more than one character - -### example - -``` -MySQL [test]> select append_trailing_char_if_absent('a','c'); -+------------------------------------------+ -| append_trailing_char_if_absent('a', 'c') | -+------------------------------------------+ -| ac | -+------------------------------------------+ -1 row in set (0.02 sec) - -MySQL [test]> select append_trailing_char_if_absent('ac','c'); -+-------------------------------------------+ -| append_trailing_char_if_absent('ac', 'c') | -+-------------------------------------------+ -| ac | -+-------------------------------------------+ -1 row in set (0.00 sec) -``` - -### keywords - - APPEND_TRAILING_CHAR_IF_ABSENT diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/ascii.md b/docs/en/docs/sql-manual/sql-functions/string-functions/ascii.md deleted file mode 100644 index fd9b0016f129ee..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/ascii.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "ASCII", - "language": "en" -} ---- - - - -## ascii -### Description -#### Syntax - -`INT AXES (WARCHAR STR)` - - -Returns the ASCII code corresponding to the first character of the string - -### example - -``` -mysql> select ascii('1'); -+------------+ -| ascii('1') | -+------------+ -| 49 | -+------------+ - -mysql> select ascii('234'); -+--------------+ -| ascii('234') | -+--------------+ -| 50 | -+--------------+ -``` -### keywords - ASCII diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/bit-length.md b/docs/en/docs/sql-manual/sql-functions/string-functions/bit-length.md deleted file mode 100644 index d9ccfd4a85cc5e..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/bit-length.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "BIT_LENGTH", - "language": "en" -} ---- - - - -## bit_length -### Description -#### Syntax - -`INT bit_length (VARCHAR str)` - - -Return length of argument in bits. - -### example - -``` -mysql> select bit_length("abc"); -+-------------------+ -| bit_length('abc') | -+-------------------+ -| 24 | -+-------------------+ - -mysql> select bit_length("中国"); -+----------------------+ -| bit_length('中国') | -+----------------------+ -| 48 | -+----------------------+ -``` -### keywords - BIT_LENGTH diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/char-length.md b/docs/en/docs/sql-manual/sql-functions/string-functions/char-length.md deleted file mode 100644 index 8b90b271f11436..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/char-length.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "CHAR_LENGTH", - "language": "en" -} ---- - - - -## char_length -### Description -#### Syntax - -INT char_length(VARCHAR str) - - -Returns the length of the string, and the number of characters returned for multi-byte characters. For example, five two-byte width words return a length of 5, only utf8 encoding is support at the current version. `character_length` is the alias for this function. - -### example - - -``` -mysql> select char_length("abc"); -+--------------------+ -| char_length('abc') | -+--------------------+ -| 3 | -+--------------------+ - -mysql> select char_length("中国"); -+------------------- ---+ -| char_length('中国') | -+-----------------------+ -| 2 | -+-----------------------+ -``` -### keywords - CHAR_LENGTH, CHARACTER_LENGTH diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/char.md b/docs/en/docs/sql-manual/sql-functions/string-functions/char.md deleted file mode 100644 index 5ec2fb86a32718..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/char.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -{ - "title": "CHAR", - "language": "en" -} ---- - - - - - -## function char -### description -#### Syntax - -`VARCHAR char(INT,..., [USING charset_name])` - -Interprets each argument as an integer and returns a string consisting of the characters given by the code values of those integers. `NULL` values are skipped. - -If the result string is illegal for the given character set, the result from `CHAR()` becomes `NULL`. - -Arguments larger than `255` are converted into multiple result bytes. For example, `char(15049882)` is equivalent to `char(229, 164, 154)`. - -Currently only `utf8` is supported for `charset_name`. - - -### example - -``` -mysql> select char(68, 111, 114, 105, 115); -+--------------------------------------+ -| char('utf8', 68, 111, 114, 105, 115) | -+--------------------------------------+ -| Doris | -+--------------------------------------+ - -mysql> select char(15049882, 15179199, 14989469); -+--------------------------------------------+ -| char('utf8', 15049882, 15179199, 14989469) | -+--------------------------------------------+ -| 多睿丝 | -+--------------------------------------------+ - -mysql> select char(255); -+-------------------+ -| char('utf8', 255) | -+-------------------+ -| NULL | -+-------------------+ -``` -### keywords - CHAR diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/concat-ws.md b/docs/en/docs/sql-manual/sql-functions/string-functions/concat-ws.md deleted file mode 100644 index 657b892e3c59d4..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/concat-ws.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -{ - "title": "CONCAT_WS", - "language": "en" -} ---- - - - -## concat_ws -### Description -#### Syntax - -```sql -VARCHAR concat_ws(VARCHAR sep, VARCHAR str,...) -VARCHAR concat_ws(VARCHAR sep, ARRAY array) -``` - -Using the first parameter SEP as a connector, the second parameter and all subsequent parameters(or all string in an ARRAY) are spliced into a string. -If the separator is NULL, return NULL. -The `concat_ws` function does not skip empty strings, it skips NULL values. - -### example - -``` -mysql> select concat_ws("or", "d", "is"); -+----------------------------+ -| concat_ws('or', 'd', 'is') | -+----------------------------+ -| doris | -+----------------------------+ - -mysql> select concat_ws(NULL, "d", "is"); -+----------------------------+ -| concat_ws(NULL, 'd', 'is') | -+----------------------------+ -| NULL | -+----------------------------+ - -mysql> select concat_ws("or", "d", NULL,"is"); -+---------------------------------+ -| concat_ws("or", "d", NULL,"is") | -+---------------------------------+ -| doris | -+---------------------------------+ - -mysql> select concat_ws("or", ["d", "is"]); -+-----------------------------------+ -| concat_ws('or', ARRAY('d', 'is')) | -+-----------------------------------+ -| doris | -+-----------------------------------+ - -mysql> select concat_ws(NULL, ["d", "is"]); -+-----------------------------------+ -| concat_ws(NULL, ARRAY('d', 'is')) | -+-----------------------------------+ -| NULL | -+-----------------------------------+ - -mysql> select concat_ws("or", ["d", NULL,"is"]); -+-----------------------------------------+ -| concat_ws('or', ARRAY('d', NULL, 'is')) | -+-----------------------------------------+ -| doris | -+-----------------------------------------+ -``` -### keywords - CONCAT_WS,CONCAT,WS,ARRAY diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/concat.md b/docs/en/docs/sql-manual/sql-functions/string-functions/concat.md deleted file mode 100644 index 7d0e54299ee604..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/concat.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "CONCAT", - "language": "en" -} ---- - - - -## concat -### Description -#### Syntax - -`VARCHAR concat (VARCHAR,...)` - - -Connect multiple strings and return NULL if any of the parameters is NULL - -### example - -``` -mysql> select concat("a", "b"); -+------------------+ -| concat('a', 'b') | -+------------------+ -| ab | -+------------------+ - -mysql> select concat("a", "b", "c"); -+-----------------------+ -| concat('a', 'b', 'c') | -+-----------------------+ -| abc | -+-----------------------+ - -mysql> select concat("a", null, "c"); -+------------------------+ -| concat('a', NULL, 'c') | -+------------------------+ -| NULL | -+------------------------+ -``` -### keywords - CONCAT diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/convert-to.md b/docs/en/docs/sql-manual/sql-functions/string-functions/convert-to.md deleted file mode 100644 index fc3c4b8cd56bd5..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/convert-to.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -{ - "title": "CONVERT_TO", - "language": "en" -} ---- - - - - - -## convert_to -### description -#### Syntax - -`VARCHAR convert_to(VARCHAR column, VARCHAR character)` - -It is used in the order by clause. eg: order by convert(column using gbk), Now only support character can be converted to 'gbk'. -Because when the order by column contains Chinese, it is not arranged in the order of Pinyin -After the character encoding of column is converted to gbk, it can be arranged according to pinyin - - - -### example - -``` -mysql> select * from class_test order by class_name; -+----------+------------+-------------+ -| class_id | class_name | student_ids | -+----------+------------+-------------+ -| 6 | asd | [6] | -| 7 | qwe | [7] | -| 8 | z | [8] | -| 2 | 哈 | [2] | -| 3 | 哦 | [3] | -| 1 | 啊 | [1] | -| 4 | 张 | [4] | -| 5 | 我 | [5] | -+----------+------------+-------------+ - -mysql> select * from class_test order by convert(class_name using gbk); -+----------+------------+-------------+ -| class_id | class_name | student_ids | -+----------+------------+-------------+ -| 6 | asd | [6] | -| 7 | qwe | [7] | -| 8 | z | [8] | -| 1 | 啊 | [1] | -| 2 | 哈 | [2] | -| 3 | 哦 | [3] | -| 5 | 我 | [5] | -| 4 | 张 | [4] | -+----------+------------+-------------+ -``` -### keywords - convert_to diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/elt.md b/docs/en/docs/sql-manual/sql-functions/string-functions/elt.md deleted file mode 100644 index 34ae915c02146e..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/elt.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -{ - "title": "ELT", - "language": "en" -} ---- - - - -## elt -### Description -#### Syntax - -`VARCHAR elt(INT, VARCHAR,...)` - -Returns the string at specified index. Returns NULL if there is no string at specified index. - -### example - -``` -mysql> select elt(1, 'aaa', 'bbb'); -+----------------------+ -| elt(1, 'aaa', 'bbb') | -+----------------------+ -| aaa | -+----------------------+ -mysql> select elt(2, 'aaa', 'bbb'); -+-----------------------+ -| elt(2, 'aaa', 'bbb') | -+-----------------------+ -| bbb | -+-----------------------+ -mysql> select elt(0, 'aaa', 'bbb'); -+----------------------+ -| elt(0, 'aaa', 'bbb') | -+----------------------+ -| NULL | -+----------------------+ -mysql> select elt(2, 'aaa', 'bbb'); -+-----------------------+ -| elt(3, 'aaa', 'bbb') | -+-----------------------+ -| NULL | -+-----------------------+ -``` -### keywords - ELT diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/ends-with.md b/docs/en/docs/sql-manual/sql-functions/string-functions/ends-with.md deleted file mode 100644 index 2ebf8e09454f1f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/ends-with.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "ENDS_WITH", - "language": "en" -} ---- - - - -## ends_with -### Description -#### Syntax - -`BOOLEAN ENDS_WITH(VARCHAR str, VARCHAR suffix)` - -It returns true if the string ends with the specified suffix, otherwise it returns false. -If any parameter is NULL, it returns NULL. - -### example - -``` -mysql> select ends_with("Hello doris", "doris"); -+-----------------------------------+ -| ends_with('Hello doris', 'doris') | -+-----------------------------------+ -| 1 | -+-----------------------------------+ - -mysql> select ends_with("Hello doris", "Hello"); -+-----------------------------------+ -| ends_with('Hello doris', 'Hello') | -+-----------------------------------+ -| 0 | -+-----------------------------------+ -``` -### keywords - ENDS_WITH diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/esquery.md b/docs/en/docs/sql-manual/sql-functions/string-functions/esquery.md deleted file mode 100644 index ecf96f56f9e597..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/esquery.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -{ - "title": "ESQUERY", - "language": "en" -} ---- - - - -## esquery -### description -#### Syntax - -`boolean esquery(varchar field, varchar QueryDSL)` - -Use the esquery (field, QueryDSL) function to match queries that cannot be expressed in SQL are pushed down to Elasticsearch for filtering. -The first column name parameter of esquery is used to associate indexes, and the second parameter is the json expression of the basic query DSL of ES, which is contained in curly brackets {}. There is one and only one root key of json, such as match_phrase、geo_Shape, bool. - -### example - -``` -match_phrase SQL: - -select * from es_table where esquery(k4, '{ - "match_phrase": { - "k4": "doris on es" - } - }'); - - -geo SQL: - -select * from es_table where esquery(k4, '{ - "geo_shape": { - "location": { - "shape": { - "type": "envelope", - "coordinates": [ - [ - 13, - 53 - ], - [ - 14, - 52 - ] - ] - }, - "relation": "within" - } - } - }'); -``` - -### keywords - esquery diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/extract-url-parameter.md b/docs/en/docs/sql-manual/sql-functions/string-functions/extract-url-parameter.md deleted file mode 100644 index 31fae29fb5cfc0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/extract-url-parameter.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -{ -"title": "EXTRACT_URL_PARAMETER", -"language": "en" -} ---- - - - -## extract_url_parameter -### description -#### Syntax - -`VARCHAR extract_url_parameter(VARCHAR url, VARCHAR name)` - - -Returns the value of the "name" parameter in the URL, if present. Otherwise an empty string. -If there are many parameters with this name, the first occurrence is returned. -This function works assuming that the parameter name is encoded in the URL exactly as it was in the passed parameter. - -``` -mysql> SELECT extract_url_parameter ("http://doris.apache.org?k1=aa&k2=bb&test=cc#999", "k2"); -+--------------------------------------------------------------------------------+ -| extract_url_parameter('http://doris.apache.org?k1=aa&k2=bb&test=cc#999', 'k2') | -+--------------------------------------------------------------------------------+ -| bb | -+--------------------------------------------------------------------------------+ -``` - -If you want to get other part of URL, you can use [parse_url](./parse_url.md). - -### keywords - EXTRACT URL PARAMETER diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/field.md b/docs/en/docs/sql-manual/sql-functions/string-functions/field.md deleted file mode 100644 index 0b66032af35a89..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/field.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -{ - "title": "FIELD", - "language": "en" -} ---- - - - -## field - - - -field - - - -### description -#### Syntax - -`field(Expr e, param1, param2, param3,.....)` - - -In the order by clause, you can use custom sorting to arrange the data in expr in the specified param1, 2, and 3 order. -The data not in the param parameter will not participate in sorting, but will be placed first. -You can use asc and desc to control the overall order. -If there is a NULL value, you can use nulls first, nulls last to control the order of nulls. - - -### example - -``` -mysql> select k1,k7 from baseall where k1 in (1,2,3) order by field(k1,2,1,3); -+------+------------+ -| k1 | k7 | -+------+------------+ -| 2 | wangyu14 | -| 1 | wangjing04 | -| 3 | yuanyuan06 | -+------+------------+ - -mysql> select class_name from class_test order by field(class_name,'Suzi','Ben','Henry'); -+------------+ -| class_name | -+------------+ -| Suzi | -| Suzi | -| Ben | -| Ben | -| Henry | -| Henry | -+------------+ - -mysql> select class_name from class_test order by field(class_name,'Suzi','Ben','Henry') desc; -+------------+ -| class_name | -+------------+ -| Henry | -| Henry | -| Ben | -| Ben | -| Suzi | -| Suzi | -+------------+ - -mysql> select class_name from class_test order by field(class_name,'Suzi','Ben','Henry') nulls first; -+------------+ -| class_name | -+------------+ -| null | -| Suzi | -| Suzi | -| Ben | -| Ben | -| Henry | -| Henry | -+------------+ -``` -### keywords - field diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/find-in-set.md b/docs/en/docs/sql-manual/sql-functions/string-functions/find-in-set.md deleted file mode 100644 index 5a4e37c18d74ae..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/find-in-set.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -{ - "title": "FIND_IN_SET", - "language": "en" -} ---- - - - -## find_in_set -### description -#### Syntax - -`INT find_in_set(VARCHAR str, VARCHAR strlist)` - -"NOT found in set (VARCHAR str., VARCHAR strlist)" - - -Return to the location where the str first appears in strlist (counting from 1). Strlist is a comma-separated string. If not, return 0. Any parameter is NULL, returning NULL. - -### example - -``` -mysql> select find_in_set("b", "a,b,c"); -+---------------------------+ -| find_in_set('b', 'a,b,c') | -+---------------------------+ -| 2 | -+---------------------------+ -``` -### keywords - FIND_IN_SET,FIND,IN,SET diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/from-base64.md b/docs/en/docs/sql-manual/sql-functions/string-functions/from-base64.md deleted file mode 100644 index ea6cfe3745a823..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/from-base64.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "FROM_BASE64", - "language": "en" -} ---- - - - -## from_base64 -### description -#### Syntax - -`VARCHAR from_base64(VARCHAR str)` - - -Returns the result of Base64 decoding the input string, NULL is returned when the input string is incorrect (with non-Base64 encoded characters). - -### example - -``` -mysql> select from_base64('MQ=='); -+---------------------+ -| from_base64('MQ==') | -+---------------------+ -| 1 | -+---------------------+ - -mysql> select from_base64('MjM0'); -+---------------------+ -| from_base64('MjM0') | -+---------------------+ -| 234 | -+---------------------+ -``` -### keywords - from_base64 diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/hex.md b/docs/en/docs/sql-manual/sql-functions/string-functions/hex.md deleted file mode 100644 index c8a5ec46edb977..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/hex.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -{ - "title": "HEX", - "language": "en" -} ---- - - - -## hex -### description -#### Syntax - -`VARCHAR hex(VARCHAR str)` - -`VARCHAR hex(BIGINT num)` - -If the input parameter is a number, the string representation of the hexadecimal value is returned; - -If the input parameter is a string, each character will be converted into two hexadecimal characters, and all the characters after the conversion will be spliced into a string for output - - -### example - -``` -input string - -mysql> select hex('1'); -+----------+ -| hex('1') | -+----------+ -| 31 | -+----------+ - -mysql> select hex('@'); -+----------+ -| hex('@') | -+----------+ -| 40 | -+----------+ - -mysql> select hex('12'); -+-----------+ -| hex('12') | -+-----------+ -| 3132 | -+-----------+ -``` - -``` -intput num - -mysql> select hex(12); -+---------+ -| hex(12) | -+---------+ -| C | -+---------+ - -mysql> select hex(-1); -+------------------+ -| hex(-1) | -+------------------+ -| FFFFFFFFFFFFFFFF | -+------------------+ -``` -### keywords - HEX diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/initcap.md b/docs/en/docs/sql-manual/sql-functions/string-functions/initcap.md deleted file mode 100644 index 4dc2c0e1d36ac1..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/initcap.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ - "title": "INITCAP", - "language": "en" -} ---- - - - -## initcap -### description -#### Syntax - -`VARCHAR initcap(VARCHAR str)` - -Convert the first letter of each word to upper case and the rest to lower case. -Words are sequences of alphanumeric characters separated by non-alphanumeric characters. - -### example - -``` -mysql> select initcap('hello hello.,HELLO123HELlo'); -+---------------------------------------+ -| initcap('hello hello.,HELLO123HELlo') | -+---------------------------------------+ -| Hello Hello.,Hello123hello | -+---------------------------------------+ -``` -### keywords - INITCAP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/instr.md b/docs/en/docs/sql-manual/sql-functions/string-functions/instr.md deleted file mode 100644 index 1106dc5251f20a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/instr.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "INSTR", - "language": "en" -} ---- - - - -## instr -### Description -#### Syntax - -`INSTR (VARCHAR STR, VARCHAR substrate)` - - -Returns the location where substr first appeared in str (counting from 1). If substr does not appear in str, return 0. - -### example - -``` -mysql> select instr("abc", "b"); -+-------------------+ -| instr('abc', 'b') | -+-------------------+ -| 2 | -+-------------------+ - -mysql> select instr("abc", "d"); -+-------------------+ -| instr('abc', 'd') | -+-------------------+ -| 0 | -+-------------------+ -``` -### keywords - INSTR diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/lcase.md b/docs/en/docs/sql-manual/sql-functions/string-functions/lcase.md deleted file mode 100644 index 8cac4e92902268..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/lcase.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -{ - "title": "LCASE", - "language": "en" -} ---- - - - -## lcase -### Description -#### Syntax - -`INT lcase (VARCHAR str)` - -Convert all strings in parameters to lowercase. Another alias for this function is [lower](./lower.md). - -### keywords - LCASE diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/length.md b/docs/en/docs/sql-manual/sql-functions/string-functions/length.md deleted file mode 100644 index 723c01eaef583d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/length.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "LENGTH", - "language": "en" -} ---- - - - -## length -### Description -#### Syntax - -`INT length (VARCHAR str)` - - -Returns the length of the string in byte size. - -### example - -``` -mysql> select length("abc"); -+---------------+ -| length('abc') | -+---------------+ -| 3 | -+---------------+ - -mysql> select length("中国"); -+------------------+ -| length('中国') | -+------------------+ -| 6 | -+------------------+ -``` -### keywords - LENGTH diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/like/like.md b/docs/en/docs/sql-manual/sql-functions/string-functions/like/like.md deleted file mode 100644 index 3089ee27d5c4e9..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/like/like.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -{ - "title": "LIKE", - "language": "en" -} ---- - - - -## like -### description -#### syntax - -`BOOLEAN like(VARCHAR str, VARCHAR pattern)` - -Perform fuzzy matching on the string str, return true if it matches, and false if it doesn't match. - -like match/fuzzy match, will be used in combination with % and _. - -the percent sign ('%') represents zero, one, or more characters. - -the underscore ('_') represents a single character. - -``` -'a' // Precise matching, the same effect as `=` -'%a' // data ending with a -'a%' // data starting with a -'%a%' // data containing a -'_a_' // three digits and the middle letter is a -'_a' // two digits and the ending letter is a -'a_' // two digits and the initial letter is a -'a__b' // four digits, starting letter is a and ending letter is b -``` -### example - -``` -// table test -+-------+ -| k1 | -+-------+ -| b | -| bb | -| bab | -| a | -+-------+ - -// Return the data containing a in the k1 string -mysql> select k1 from test where k1 like '%a%'; -+-------+ -| k1 | -+-------+ -| a | -| bab | -+-------+ - -// Return the data equal to a in the k1 string -mysql> select k1 from test where k1 like 'a'; -+-------+ -| k1 | -+-------+ -| a | -+-------+ -``` - -### keywords - LIKE diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/like/not-like.md b/docs/en/docs/sql-manual/sql-functions/string-functions/like/not-like.md deleted file mode 100644 index 7f040041a80a3a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/like/not-like.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -{ - "title": "NOT LIKE", - "language": "en" -} ---- - - - -## not like -### description -#### syntax - -`BOOLEAN not like(VARCHAR str, VARCHAR pattern)` - -Perform fuzzy matching on the string str, return false if it matches, and return true if it doesn't match. - -like match/fuzzy match, will be used in combination with % and _. - -the percent sign ('%') represents zero, one, or more characters. - -the underscore ('_') represents a single character. - -``` -'a' // Precise matching, the same effect as `=` -'%a' // data ending with a -'a%' // data starting with a -'%a%' // data containing a -'_a_' // three digits and the middle letter is a -'_a' // two digits and the ending letter is a -'a_' // two digits and the initial letter is a -'a__b' // four digits, starting letter is a and ending letter is b -``` -### example - -``` -// table test -+-------+ -| k1 | -+-------+ -| b | -| bb | -| bab | -| a | -+-------+ - -// Return data that does not contain a in the k1 string -mysql> select k1 from test where k1 not like '%a%'; -+-------+ -| k1 | -+-------+ -| b | -| bb | -+-------+ - -// Return the data that is not equal to a in the k1 string -mysql> select k1 from test where k1 not like 'a'; -+-------+ -| k1 | -+-------+ -| b | -| bb | -| bab | -+-------+ -``` - -### keywords - LIKE, NOT, NOT LIKE diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/locate.md b/docs/en/docs/sql-manual/sql-functions/string-functions/locate.md deleted file mode 100644 index f3b53356a48163..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/locate.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "LOCATE", - "language": "en" -} ---- - - - -## locate -### Description -#### Syntax - -`INT LOCATION (VARCHAR substrate, VARCHAR str [, INT pos]]` - - -Returns where substr appears in str (counting from 1). If the third parameter POS is specified, the position where substr appears is found from the string where STR starts with POS subscript. If not found, return 0 - -### example - -``` -mysql> SELECT LOCATE('bar', 'foobarbar'); -+----------------------------+ -| locate('bar', 'foobarbar') | -+----------------------------+ -| 4 | -+----------------------------+ - -mysql> SELECT LOCATE('xbar', 'foobar'); -+--------------------------+ -| locate('xbar', 'foobar') | -+--------------------------+ -| 0 | -+--------------------------+ - -mysql> SELECT LOCATE('bar', 'foobarbar', 5); -+-------------------------------+ -| locate('bar', 'foobarbar', 5) | -+-------------------------------+ -| 7 | -+-------------------------------+ -``` -### keywords - LOCATE diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/lower.md b/docs/en/docs/sql-manual/sql-functions/string-functions/lower.md deleted file mode 100644 index ad72a5e7c73fd2..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/lower.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ - "title": "LOWER", - "language": "en" -} ---- - - - -## lower -### Description -#### Syntax - -`VARCHAR lower (VARCHAR str)` - - -Convert all strings in parameters to lowercase. Another alias for this function is [lcase](lcase.md). - -### example - -``` -mysql> SELECT lower("AbC123"); -+-----------------+ -| lower('AbC123') | -+-----------------+ -| abc123 | -+-----------------+ -``` -### keywords - LOWER diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/lpad.md b/docs/en/docs/sql-manual/sql-functions/string-functions/lpad.md deleted file mode 100644 index 5e2d9b95536300..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/lpad.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "LPAD", - "language": "en" -} ---- - - - -## lpad -### Description -#### Syntax - -`VARCHAR lpad (VARCHAR str, INT len, VARCHAR pad)` - - -Returns a string of length len in str, starting with the initials. If len is longer than str, pad characters are added to STR until the length of the string reaches len. If len is less than str's length, the function is equivalent to truncating STR strings and returning only strings of len's length. The len is character length not the bye size. - -### example - -``` -mysql> SELECT lpad("hi", 5, "xy"); -+---------------------+ -| lpad('hi', 5, 'xy') | -+---------------------+ -| xyxhi | -+---------------------+ - -mysql> SELECT lpad("hi", 1, "xy"); -+---------------------+ -| lpad('hi', 1, 'xy') | -+---------------------+ -| h | -+---------------------+ -``` -### keywords - LPAD diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/ltrim.md b/docs/en/docs/sql-manual/sql-functions/string-functions/ltrim.md deleted file mode 100644 index b219a7e75c2fe3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/ltrim.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "LTRIM", - "language": "en" -} ---- - - - -## ltrim -### Description -#### Syntax - -`VARCHAR ltrim(VARCHAR str[, VARCHAR rhs])` - - -When the 'rhs' parameter is not present, remove the continuous spaces that appear from the beginning of the 'str' parameter. Otherwise, remove 'rhs'. - -### example - -``` -mysql> SELECT ltrim(' ab d'); -+------------------+ -| ltrim(' ab d') | -+------------------+ -| ab d | -+------------------+ - -mysql> SELECT ltrim('ababccaab','ab') str; -+-------+ -| str | -+-------+ -| ccaab | -+-------+ -``` -### keywords - LTRIM diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/mask/mask-first-n.md b/docs/en/docs/sql-manual/sql-functions/string-functions/mask/mask-first-n.md deleted file mode 100644 index 1c78ef231c3806..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/mask/mask-first-n.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -{ - "title": "MASK_FIRST_N", - "language": "en" -} ---- - - - -## mask_first_n -### description -#### syntax - -`VARCHAR mask_first_n(VARCHAR str[, INT n])` - -Returns a masked version of str with the first n values masked. Upper case letters are converted to "X", lower case letters are converted to "x" and numbers are converted to "n". For example, mask_first_n("1234-5678-8765-4321", 4) results in nnnn-5678-8765-4321. - -### example - -``` -// table test -+-----------+ -| name | -+-----------+ -| abc123EFG | -| NULL | -| 456AbCdEf | -+-----------+ - -mysql> select mask_first_n(name, 5) from test; -+-------------------------+ -| mask_first_n(`name`, 5) | -+-------------------------+ -| xxxnn3EFG | -| NULL | -| nnnXxCdEf | -+-------------------------+ -``` - -### keywords - mask_first_n diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/mask/mask-last-n.md b/docs/en/docs/sql-manual/sql-functions/string-functions/mask/mask-last-n.md deleted file mode 100644 index e0a740fbecb632..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/mask/mask-last-n.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -{ - "title": "MASK_LAST_N", - "language": "en" -} ---- - - - -## mask_last_n -### description -#### syntax - -`VARCHAR mask_last_n(VARCHAR str[, INT n])` - -Returns a masked version of str with the last n values masked. Upper case letters are converted to "X", lower case letters are converted to "x" and numbers are converted to "n". For example, mask_last_n("1234-5678-8765-4321", 4) results in 1234-5678-8765-nnnn. - -### example - -``` -// table test -+-----------+ -| name | -+-----------+ -| abc123EFG | -| NULL | -| 456AbCdEf | -+-----------+ - -mysql> select mask_last_n(name, 5) from test; -+------------------------+ -| mask_last_n(`name`, 5) | -+------------------------+ -| abc1nnXXX | -| NULL | -| 456AxXxXx | -+------------------------+ -``` - -### keywords - mask_last_n diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/mask/mask.md b/docs/en/docs/sql-manual/sql-functions/string-functions/mask/mask.md deleted file mode 100644 index 3b0a66a11b3865..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/mask/mask.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -{ - "title": "MASK", - "language": "en" -} ---- - - - -## mask -### description -#### syntax - -`VARCHAR mask(VARCHAR str[, VARCHAR upper[, VARCHAR lower[, VARCHAR number]]])` - -Returns a masked version of str . By default, upper case letters are converted to "X", lower case letters are converted to "x" and numbers are converted to "n". For example mask("abcd-EFGH-8765-4321") results in xxxx-XXXX-nnnn-nnnn. You can override the characters used in the mask by supplying additional arguments: the second argument controls the mask character for upper case letters, the third argument for lower case letters and the fourth argument for numbers. For example, mask("abcd-EFGH-8765-4321", "U", "l", "#") results in llll-UUUU-####-####. - -### example - -``` -// table test -+-----------+ -| name | -+-----------+ -| abc123EFG | -| NULL | -| 456AbCdEf | -+-----------+ - -mysql> select mask(name) from test; -+--------------+ -| mask(`name`) | -+--------------+ -| xxxnnnXXX | -| NULL | -| nnnXxXxXx | -+--------------+ - -mysql> select mask(name, '*', '#', '$') from test; -+-----------------------------+ -| mask(`name`, '*', '#', '$') | -+-----------------------------+ -| ###$$$*** | -| NULL | -| $$$*#*#*# | -+-----------------------------+ -``` - -### keywords - mask diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/money-format.md b/docs/en/docs/sql-manual/sql-functions/string-functions/money-format.md deleted file mode 100644 index 629290b6e77983..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/money-format.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "MONEY_FORMAT", - "language": "en" -} ---- - - - -## money_format -### Description -#### Syntax - -`VARCHAR money format (Number)` - - -The number is output in currency format, the integer part is separated by commas every three bits, and the decimal part is reserved for two bits. - -### example - -``` -mysql> select money_format(17014116); -+------------------------+ -| money_format(17014116) | -+------------------------+ -| 17,014,116.00 | -+------------------------+ - -mysql> select money_format(1123.456); -+------------------------+ -| money_format(1123.456) | -+------------------------+ -| 1,123.46 | -+------------------------+ - -mysql> select money_format(1123.4); -+----------------------+ -| money_format(1123.4) | -+----------------------+ -| 1,123.40 | -+----------------------+ -``` -### keywords - MONEY_FORMAT,MONEY,FORMAT diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/not-null-or-empty.md b/docs/en/docs/sql-manual/sql-functions/string-functions/not-null-or-empty.md deleted file mode 100644 index 08f6b56b604749..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/not-null-or-empty.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ - "title": "NOT_NULL_OR_EMPTY", - "language": "en" -} ---- - - - -## not_null_or_empty -### description -#### Syntax - -`BOOLEAN NOT_NULL_OR_EMPTY (VARCHAR str)` - -It returns false if the string is an empty string or NULL. Otherwise it returns true. - -### example - -``` -MySQL [(none)]> select not_null_or_empty(null); -+-------------------------+ -| not_null_or_empty(NULL) | -+-------------------------+ -| 0 | -+-------------------------+ - -MySQL [(none)]> select not_null_or_empty(""); -+-----------------------+ -| not_null_or_empty('') | -+-----------------------+ -| 0 | -+-----------------------+ - -MySQL [(none)]> select not_null_or_empty("a"); -+------------------------+ -| not_null_or_empty('a') | -+------------------------+ -| 1 | -+------------------------+ -``` -### keywords - NOT_NULL_OR_EMPTY diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/null-or-empty.md b/docs/en/docs/sql-manual/sql-functions/string-functions/null-or-empty.md deleted file mode 100644 index 87c60cee3b3ab3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/null-or-empty.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ - "title": "NULL_OR_EMPTY", - "language": "en" -} ---- - - - -## null_or_empty -### description -#### Syntax - -`BOOLEAN NULL_OR_EMPTY (VARCHAR str)` - -It returns true if the string is an empty string or NULL. Otherwise it returns false. - -### example - -``` -MySQL [(none)]> select null_or_empty(null); -+---------------------+ -| null_or_empty(NULL) | -+---------------------+ -| 1 | -+---------------------+ - -MySQL [(none)]> select null_or_empty(""); -+-------------------+ -| null_or_empty('') | -+-------------------+ -| 1 | -+-------------------+ - -MySQL [(none)]> select null_or_empty("a"); -+--------------------+ -| null_or_empty('a') | -+--------------------+ -| 0 | -+--------------------+ -``` -### keywords - NULL_OR_EMPTY diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/parse-url.md b/docs/en/docs/sql-manual/sql-functions/string-functions/parse-url.md deleted file mode 100644 index 68523da23ffa9d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/parse-url.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -{ - "title": "PARSE_URL", - "language": "en" -} ---- - - - -## parse_url -### description -#### Syntax - -`VARCHAR parse_url(VARCHAR url, VARCHAR name)` - - -From the URL, the field corresponding to name is resolved. The name options are as follows: 'PROTOCOL', 'HOST', 'PATH', 'REF', 'AUTHORITY', 'FILE', 'USERINFO', 'PORT', 'QUERY', and the result is returned. - -### example - -``` -mysql> SELECT parse_url ('https://doris.apache.org/', 'HOST'); -+------------------------------------------------+ -| parse_url('https://doris.apache.org/', 'HOST') | -+------------------------------------------------+ -| doris.apache.org | -+------------------------------------------------+ -``` - -If you want to get parameter in QUERY, you can use [extract_url_parameter](./extract_url_parameter.md). - -### keywords - PARSE URL diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/random_bytes.md b/docs/en/docs/sql-manual/sql-functions/string-functions/random_bytes.md deleted file mode 100644 index 0e54641d293868..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/random_bytes.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "random_bytes", - "language": "en" -} ---- - - - -## random_bytes -### description - -The `random_bytes` function generates a sequence of random bytes. - -#### Syntax - -```sql -VARCHAR random_bytes(INT len) -``` - -### Parameters - -- len: The `random_bytes` function takes a single argument, which specifies the length of the generated random byte sequence. - -### example - -``` -mysql> select random_bytes(7); -+------------------------------------------------+ -| random_bytes(7) | -+------------------------------------------------+ -| 0x53edd97401fb6d | -+------------------------------------------------+ -``` - -### keywords - RANDOM BYTES diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/not-regexp.md b/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/not-regexp.md deleted file mode 100644 index facb912871a168..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/not-regexp.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "NOT REGEXP", - "language": "en" -} ---- - - - -## not regexp -### description -#### syntax - -`BOOLEAN not regexp(VARCHAR str, VARCHAR pattern)` - -Perform regular matching on the string str, return false if it matches, and return true if it doesn't match. pattern is a regular expression. - -### example - -``` -// Find all data in the k1 field that does not start with 'billie' -mysql> select k1 from test where k1 not regexp '^billie'; -+--------------------+ -| k1 | -+--------------------+ -| Emmy eillish | -+--------------------+ - -// Find all the data in the k1 field that does not end with 'ok': -mysql> select k1 from test where k1 not regexp 'ok$'; -+------------+ -| k1 | -+------------+ -| It's true | -+------------+ -``` - -### keywords - REGEXP, NOT, NOT REGEXP diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp-extract-all.md b/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp-extract-all.md deleted file mode 100644 index a7788de8cd7bcf..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp-extract-all.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "REGEXP_EXTRACT_ALL", - "language": "en" -} ---- - - - -## regexp_extract_all -### Description -#### Syntax - -`VARCHAR regexp_extract_all (VARCHAR str, VARCHAR pattern)` - -Regularly matches a string str and extracts the first sub-pattern matching part of pattern. The pattern needs to exactly match a part of str in order to return an array of strings for the part of the pattern that needs to be matched. If there is no match or the pattern has no sub-pattern, the empty string is returned. - -### example - -``` -mysql> SELECT regexp_extract_all('AbCdE', '([[:lower:]]+)C([[:lower:]]+)'); -+--------------------------------------------------------------+ -| regexp_extract_all('AbCdE', '([[:lower:]]+)C([[:lower:]]+)') | -+--------------------------------------------------------------+ -| ['b'] | -+--------------------------------------------------------------+ - -mysql> SELECT regexp_extract_all('AbCdEfCg', '([[:lower:]]+)C([[:lower:]]+)'); -+-----------------------------------------------------------------+ -| regexp_extract_all('AbCdEfCg', '([[:lower:]]+)C([[:lower:]]+)') | -+-----------------------------------------------------------------+ -| ['b','f'] | -+-----------------------------------------------------------------+ - -mysql> SELECT regexp_extract_all('abc=111, def=222, ghi=333','("[^"]+"|\\w+)=("[^"]+"|\\w+)'); -+--------------------------------------------------------------------------------+ -| regexp_extract_all('abc=111, def=222, ghi=333', '("[^"]+"|\w+)=("[^"]+"|\w+)') | -+--------------------------------------------------------------------------------+ -| ['abc','def','ghi'] | -+--------------------------------------------------------------------------------+ -``` - -### keywords - REGEXP_EXTRACT_ALL,REGEXP,EXTRACT,ALL diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp-extract.md b/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp-extract.md deleted file mode 100644 index 9c567db6769094..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp-extract.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "REGEXP_EXTRACT", - "language": "en" -} ---- - - - -## regexp_extract -### Description -#### Syntax - -`VARCHAR regexp_extract (VARCHAR str, VARCHAR pattern, int pos)` - - -The string STR is matched regularly and the POS matching part which conforms to pattern is extracted. Patterns need to match exactly some part of the STR to return to the matching part of the pattern. If there is no match, return an empty string. - -### example - -``` -mysql> SELECT regexp_extract('AbCdE', '([[:lower:]]+)C([[:lower:]]+)', 1); -+-------------------------------------------------------------+ -| regexp_extract('AbCdE', '([[:lower:]]+)C([[:lower:]]+)', 1) | -+-------------------------------------------------------------+ -| b | -+-------------------------------------------------------------+ - -mysql> SELECT regexp_extract('AbCdE', '([[:lower:]]+)C([[:lower:]]+)', 2); -+-------------------------------------------------------------+ -| regexp_extract('AbCdE', '([[:lower:]]+)C([[:lower:]]+)', 2) | -+-------------------------------------------------------------+ -| d | -+-------------------------------------------------------------+ -``` -### keywords - REGEXP_EXTRACT,REGEXP,EXTRACT diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp-replace-one.md b/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp-replace-one.md deleted file mode 100644 index 8344122e4a93b6..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp-replace-one.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "REGEXP_REPLACE_ONE", - "language": "en" -} ---- - - - -## regexp_replace_one -### description -#### Syntax - -`VARCHAR regexp_replace_one(VARCHAR str, VARCHAR pattern, VARCHAR repl)` - - -Regular matching of STR strings, replacing the part hitting pattern with repl, replacing only the first match. - -### example - -``` -mysql> SELECT regexp_replace_one('a b c', " ", "-"); -+-----------------------------------+ -| regexp_replace_one('a b c', ' ', '-') | -+-----------------------------------+ -| a-b c | -+-----------------------------------+ - -mysql> SELECT regexp_replace_one('a b b','(b)','<\\1>'); -+----------------------------------------+ -| regexp_replace_one('a b b', '(b)', '<\1>') | -+----------------------------------------+ -| a b | -+----------------------------------------+ -``` -### keywords - REGEXP_REPLACE_ONE,REGEXP,REPLACE,ONE diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp-replace.md b/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp-replace.md deleted file mode 100644 index 1d5dae96bb64d0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp-replace.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "REGEXP_REPLACE", - "language": "en" -} ---- - - - -## regexp_replace -### description -#### Syntax - -`VARCHAR regexp_replace(VARCHAR str, VARCHAR pattern, VARCHAR repl)` - - -Regular matching of STR strings, replacing the part hitting pattern with repl - -### example - -``` -mysql> SELECT regexp_replace('a b c', " ", "-"); -+-----------------------------------+ -| regexp_replace('a b c', ' ', '-') | -+-----------------------------------+ -| a-b-c | -+-----------------------------------+ - -mysql> SELECT regexp_replace('a b c','(b)','<\\1>'); -+----------------------------------------+ -| regexp_replace('a b c', '(b)', '<\1>') | -+----------------------------------------+ -| a c | -+----------------------------------------+ -``` -### keywords - REGEXP_REPLACE,REGEXP,REPLACE diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp.md b/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp.md deleted file mode 100644 index 3f1b362268c674..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/regexp/regexp.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "REGEXP", - "language": "en" -} ---- - - - -## regexp -### description -#### syntax - -`BOOLEAN regexp(VARCHAR str, VARCHAR pattern)` - -Perform regular matching on the string str, return true if it matches, and return false if it doesn't match. pattern is a regular expression. - -### example - -``` -// Find all data starting with 'billie' in the k1 field -mysql> select k1 from test where k1 regexp '^billie'; -+--------------------+ -| k1 | -+--------------------+ -| billie eillish | -+--------------------+ - -// Find all data ending with 'ok' in the k1 field: -mysql> select k1 from test where k1 regexp 'ok$'; -+----------+ -| k1 | -+----------+ -| It's ok | -+----------+ -``` - -### keywords - REGEXP diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/repeat.md b/docs/en/docs/sql-manual/sql-functions/string-functions/repeat.md deleted file mode 100644 index ca45d6d4577d62..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/repeat.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "REPEAT", - "language": "en" -} ---- - - - -## repeat -### Description -#### Syntax - -`VARCHAR repeat (VARCHAR str, INT count)` - - -Repeat the str of the string count times, return empty string when count is less than 1, return NULL when str, count is any NULL - -:::tip -It can be repeated up to 10000 times by default, you can adjust session variable to change it -``` -set repeat_max_num = 20000 -``` -::: - -### example - -``` -mysql> SELECT repeat("a", 3); -+----------------+ -| repeat('a', 3) | -+----------------+ -| aaa | -+----------------+ - -mysql> SELECT repeat("a", -1); -+-----------------+ -| repeat('a', -1) | -+-----------------+ -| | -+-----------------+ -``` -### keywords - REPEAT diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/replace.md b/docs/en/docs/sql-manual/sql-functions/string-functions/replace.md deleted file mode 100644 index 20e89f2ef008bb..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/replace.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -{ - "title": "REPLACE", - "language": "en" -} ---- - - - -## replace -### description -#### Syntax - -`VARCHAR REPLACE (VARCHAR str, VARCHAR old, VARCHAR new)` - -replace all old substring with new substring in str - -### example - -``` -mysql> select replace("http://www.baidu.com:9090", "9090", ""); -+------------------------------------------------------+ -| replace('http://www.baidu.com:9090', '9090', '') | -+------------------------------------------------------+ -| http://www.baidu.com: | -+------------------------------------------------------+ -``` -### keywords - REPLACE diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/reverse.md b/docs/en/docs/sql-manual/sql-functions/string-functions/reverse.md deleted file mode 100644 index 12e660fe39f3b7..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/reverse.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -{ - "title": "REVERSE", - "language": "en" -} ---- - - - -## reverse -### description -#### Syntax - -```sql -VARCHAR reverse(VARCHAR str) -ARRAY reverse(ARRAY arr) -``` - -The REVERSE() function reverses a string or array and returns the result. - -### notice - -`For the array type, only supported in vectorized engine` - -### example - -``` -mysql> SELECT REVERSE('hello'); -+------------------+ -| REVERSE('hello') | -+------------------+ -| olleh | -+------------------+ -1 row in set (0.00 sec) - -mysql> SELECT REVERSE('你好'); -+------------------+ -| REVERSE('你好') | -+------------------+ -| 好你 | -+------------------+ -1 row in set (0.00 sec) - -mysql> set enable_vectorized_engine=true; - -mysql> select k1, k2, reverse(k2) from array_test order by k1; -+------+-----------------------------+-----------------------------+ -| k1 | k2 | reverse(`k2`) | -+------+-----------------------------+-----------------------------+ -| 1 | [1, 2, 3, 4, 5] | [5, 4, 3, 2, 1] | -| 2 | [6, 7, 8] | [8, 7, 6] | -| 3 | [] | [] | -| 4 | NULL | NULL | -| 5 | [1, 2, 3, 4, 5, 4, 3, 2, 1] | [1, 2, 3, 4, 5, 4, 3, 2, 1] | -| 6 | [1, 2, 3, NULL] | [NULL, 3, 2, 1] | -| 7 | [4, 5, 6, NULL, NULL] | [NULL, NULL, 6, 5, 4] | -+------+-----------------------------+-----------------------------+ - -mysql> select k1, k2, reverse(k2) from array_test01 order by k1; -+------+-----------------------------------+-----------------------------------+ -| k1 | k2 | reverse(`k2`) | -+------+-----------------------------------+-----------------------------------+ -| 1 | ['a', 'b', 'c', 'd'] | ['d', 'c', 'b', 'a'] | -| 2 | ['e', 'f', 'g', 'h'] | ['h', 'g', 'f', 'e'] | -| 3 | [NULL, 'a', NULL, 'b', NULL, 'c'] | ['c', NULL, 'b', NULL, 'a', NULL] | -| 4 | ['d', 'e', NULL, ' '] | [' ', NULL, 'e', 'd'] | -| 5 | [' ', NULL, 'f', 'g'] | ['g', 'f', NULL, ' '] | -+------+-----------------------------------+-----------------------------------+ -``` -### keywords - REVERSE, ARRAY diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/rpad.md b/docs/en/docs/sql-manual/sql-functions/string-functions/rpad.md deleted file mode 100644 index f5ffb5bae4d60f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/rpad.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "RPAD", - "language": "en" -} ---- - - - -## rpad -### Description -#### Syntax - -`VARCHAR rpad (VARCHAR str, INT len, VARCHAR pad)` - - -Returns a string of length len in str, starting with the initials. If len is longer than str, pad characters are added to the right of STR until the length of the string reaches len. If len is less than str's length, the function is equivalent to truncating STR strings and returning only strings of len's length. The len is character length not the bye size. - -### example - -``` -mysql> SELECT rpad("hi", 5, "xy"); -+---------------------+ -| rpad('hi', 5, 'xy') | -+---------------------+ -| hixyx | -+---------------------+ - -mysql> SELECT rpad("hi", 1, "xy"); -+---------------------+ -| rpad('hi', 1, 'xy') | -+---------------------+ -| h | -+---------------------+ -``` -### keywords - RPAD diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/rtrim.md b/docs/en/docs/sql-manual/sql-functions/string-functions/rtrim.md deleted file mode 100644 index 55a23b2122cc04..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/rtrim.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "RTRIM", - "language": "en" -} ---- - - - -## rtrim -### description -#### Syntax - -`VARCHAR rtrim(VARCHAR str[, VARCHAR rhs])` - - -When the 'rhs' parameter is not present, remove the continuous spaces that appear from the ending of the 'str' parameter. Otherwise, remove 'rhs'. - -### example - -``` -mysql> SELECT rtrim('ab d ') str; -+------+ -| str | -+------+ -| ab d | -+------+ - -mysql> SELECT rtrim('ababccaab','ab') str; -+---------+ -| str | -+---------+ -| ababcca | -+---------+ -``` -### keywords - RTRIM diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/search/multi-match-any.md b/docs/en/docs/sql-manual/sql-functions/string-functions/search/multi-match-any.md deleted file mode 100644 index 543f935f36509c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/search/multi-match-any.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "MULTI_MATCH_ANY", - "language": "en" -} ---- - - - -## multi_match_any -### Description -#### Syntax - -`TINYINT multi_match_any(VARCHAR haystack, ARRAY patterns)` - - -Checks whether the string `haystack` matches the regular expressions `patterns` in re2 syntax. returns 0 if none of the regular expressions are matched and 1 if any of the patterns matches. - -### example - -``` -mysql> select multi_match_any('Hello, World!', ['hello', '!', 'world']); -+-----------------------------------------------------------+ -| multi_match_any('Hello, World!', ['hello', '!', 'world']) | -+-----------------------------------------------------------+ -| 1 | -+-----------------------------------------------------------+ - -mysql> select multi_match_any('abc', ['A', 'bcd']); -+--------------------------------------+ -| multi_match_any('abc', ['A', 'bcd']) | -+--------------------------------------+ -| 0 | -+--------------------------------------+ -``` -### keywords - MULTI_MATCH,MATCH,ANY diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/search/multi-search-all-positions.md b/docs/en/docs/sql-manual/sql-functions/string-functions/search/multi-search-all-positions.md deleted file mode 100644 index c2d72c41d0e400..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/search/multi-search-all-positions.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "MULTI_SEARCH_ALL_POSITIONS", - "language": "en" -} ---- - - - -## multi_search_all_positions -### Description -#### Syntax - -`ARRAY multi_search_all_positions(VARCHAR haystack, ARRAY needles)` - -Returns an `ARRAY` where the `i`-th element is the position of the `i`-th element in `needles`(i.e. `needle`)'s **first** occurrence in the string `haystack`. Positions are counted from 1, with 0 meaning the element was not found. **Case-sensitive**. - -### example - -``` -mysql> select multi_search_all_positions('Hello, World!', ['hello', '!', 'world']); -+----------------------------------------------------------------------+ -| multi_search_all_positions('Hello, World!', ['hello', '!', 'world']) | -+----------------------------------------------------------------------+ -| [0,13,0] | -+----------------------------------------------------------------------+ - -select multi_search_all_positions("Hello, World!", ['hello', '!', 'world', 'Hello', 'World']); -+---------------------------------------------------------------------------------------------+ -| multi_search_all_positions('Hello, World!', ARRAY('hello', '!', 'world', 'Hello', 'World')) | -+---------------------------------------------------------------------------------------------+ -| [0, 13, 0, 1, 8] | -+---------------------------------------------------------------------------------------------+ -``` - -### keywords - MULTI_SEARCH,SEARCH,POSITIONS diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/sleep.md b/docs/en/docs/sql-manual/sql-functions/string-functions/sleep.md deleted file mode 100644 index 4aa6b16beb8373..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/sleep.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "SLEEP", - "language": "en" -} ---- - - - -## sleep -### Description -#### Syntax - -`BOOLEAN sleep(INT num)` - -Make the thread sleep for num seconds. - -### example - -``` -mysql> select sleep(10); -+-----------+ -| sleep(10) | -+-----------+ -| 1 | -+-----------+ -1 row in set (10.01 sec) - -``` -### keywords - sleep diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/space.md b/docs/en/docs/sql-manual/sql-functions/string-functions/space.md deleted file mode 100644 index c2922ef8c3cccd..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/space.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "SPACE", - "language": "en" -} ---- - - - -## space -### Description -#### Syntax - -`VARCHAR space(Int num)` - -Returns a string consisting of num spaces. - -### example - -``` -mysql> select length(space(10)); -+-------------------+ -| length(space(10)) | -+-------------------+ -| 10 | -+-------------------+ -1 row in set (0.01 sec) - -mysql> select length(space(-10)); -+--------------------+ -| length(space(-10)) | -+--------------------+ -| 0 | -+--------------------+ -1 row in set (0.02 sec) -``` -### keywords - space diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/split-by-string.md b/docs/en/docs/sql-manual/sql-functions/string-functions/split-by-string.md deleted file mode 100644 index 1a7bd0077169fd..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/split-by-string.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -{ - "title": "SPLIT_BY_STRING", - "language": "en" -} ---- - - - -## split_by_string - - - - -### description - -#### Syntax - -`ARRAY split_by_string(STRING s, STRING separator)` - -Splits a string into substrings separated by a string. It uses a constant string separator of multiple characters as the separator. If the string separator is empty, it will split the string s into an array of single characters. - -#### Arguments -`separator` — The separator. Type: `String` - -`s` — The string to split. Type: `String` - -#### Returned value(s) - -Returns an array of selected substrings. Empty substrings may be selected when: - -A non-empty separator occurs at the beginning or end of the string; - -There are multiple consecutive separators; - -The original string s is empty. - -Type: `Array(String)` - -### notice - -`Only supported in vectorized engine` - -### example - -``` -SELECT split_by_string('1, 2 3, 4,5, abcde', ', '); -select split_by_string('a1b1c1d','1'); -+---------------------------------+ -| split_by_string('a1b1c1d', '1') | -+---------------------------------+ -| ['a', 'b', 'c', 'd'] | -+---------------------------------+ - -select split_by_string(',,a,b,c,',','); -+----------------------------------+ -| split_by_string(',,a,b,c,', ',') | -+----------------------------------+ -| ['', '', 'a', 'b', 'c', ''] | -+----------------------------------+ - -SELECT split_by_string(NULL,','); -+----------------------------+ -| split_by_string(NULL, ',') | -+----------------------------+ -| NULL | -+----------------------------+ - -select split_by_string('a,b,c,abcde',','); -+-------------------------------------+ -| split_by_string('a,b,c,abcde', ',') | -+-------------------------------------+ -| ['a', 'b', 'c', 'abcde'] | -+-------------------------------------+ - -select split_by_string('1,,2,3,,4,5,,abcde', ',,'); -+---------------------------------------------+ -| split_by_string('1,,2,3,,4,5,,abcde', ',,') | -+---------------------------------------------+ -| ['1', '2,3', '4,5', 'abcde'] | -+---------------------------------------------+ - -select split_by_string(',,,,',',,'); -+-------------------------------+ -| split_by_string(',,,,', ',,') | -+-------------------------------+ -| ['', '', ''] | -+-------------------------------+ - -select split_by_string(',,a,,b,,c,,',',,'); -+--------------------------------------+ -| split_by_string(',,a,,b,,c,,', ',,') | -+--------------------------------------+ -| ['', 'a', 'b', 'c', ''] | -+--------------------------------------+ -``` -### keywords - -SPLIT_BY_STRING,SPLIT \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/split-part.md b/docs/en/docs/sql-manual/sql-functions/string-functions/split-part.md deleted file mode 100644 index 4aa334c794c22c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/split-part.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -{ - "title": "SPLIT_PART", - "language": "en" -} ---- - - - -## split_part -### Description -#### Syntax - -`VARCHAR split_part(VARCHAR content, VARCHAR delimiter, INT field)` - - -Returns the specified partition by splitting the string according to the delimiter. If field is positive, splitting and counting from the beginning of content, otherwise from the ending. - -`delimiter` and `field` parameter should be constant. - -### example - -``` -mysql> select split_part("hello world", " ", 1); -+----------------------------------+ -| split_part('hello world', ' ', 1) | -+----------------------------------+ -| hello | -+----------------------------------+ - - -mysql> select split_part("hello world", " ", 2); -+----------------------------------+ -| split_part('hello world', ' ', 2) | -+----------------------------------+ -| world | -+----------------------------------+ - -mysql> select split_part("2019年7月8号", "月", 1); -+-----------------------------------------+ -| split_part('2019年7月8号', '月', 1) | -+-----------------------------------------+ -| 2019年7 | -+-----------------------------------------+ - -mysql> select split_part("abca", "a", 1); -+----------------------------+ -| split_part('abca', 'a', 1) | -+----------------------------+ -| | -+----------------------------+ - -mysql> select split_part("prefix_string", "_", -1); -+--------------------------------------+ -| split_part('prefix_string', '_', -1) | -+--------------------------------------+ -| string | -+--------------------------------------+ - - -mysql> select split_part("prefix_string", "_", -2); -+--------------------------------------+ -| split_part('prefix_string', '_', -2) | -+--------------------------------------+ -| prefix | -+--------------------------------------+ - -mysql> select split_part("abc##123###234", "##", -1); -+----------------------------------------+ -| split_part('abc##123###234', '##', -1) | -+----------------------------------------+ -| 234 | -+----------------------------------------+ - -mysql> select split_part("abc##123###234", "##", -2); -+----------------------------------------+ -| split_part('abc##123###234', '##', -2) | -+----------------------------------------+ -| 123# | -+----------------------------------------+ -``` -### keywords - SPLIT_PART,SPLIT,PART diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/starts-with.md b/docs/en/docs/sql-manual/sql-functions/string-functions/starts-with.md deleted file mode 100644 index 660680730ecd38..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/starts-with.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "STARTS_WITH", - "language": "en" -} ---- - - - -## starts_with -### Description -#### Syntax - -`BOOLEAN STARTS_WITH(VARCHAR str, VARCHAR prefix)` - -It returns true if the string starts with the specified prefix, otherwise it returns false. -If any parameter is NULL, it returns NULL. - -### example - -``` -MySQL [(none)]> select starts_with("hello world","hello"); -+-------------------------------------+ -| starts_with('hello world', 'hello') | -+-------------------------------------+ -| 1 | -+-------------------------------------+ - -MySQL [(none)]> select starts_with("hello world","world"); -+-------------------------------------+ -| starts_with('hello world', 'world') | -+-------------------------------------+ -| 0 | -+-------------------------------------+ -``` -### keywords - STARTS_WITH diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/strleft.md b/docs/en/docs/sql-manual/sql-functions/string-functions/strleft.md deleted file mode 100644 index 42980c4fc4f88c..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/strleft.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -{ - "title": "STRLEFT", - "language": "en" -} ---- - - - -## strleft -### Description -#### Syntax - -`VARCHAR STRLEFT (VARCHAR str, INT len)` - - -It returns the left part of a string of specified length, length is char length not the byte size. Another alias for this function is `left`. -If the function parameters contain a NULL value, the function will always return NULL. If the integer parameter is less than or equal to 0, it will return an empty value. - -### example - -``` -mysql> select strleft("Hello doris",5); -+------------------------+ -| strleft('Hello doris', 5) | -+------------------------+ -| Hello | -+------------------------+ -mysql> select strleft("Hello doris",-5); -+----------------------------+ -| strleft('Hello doris', -5) | -+----------------------------+ -| | -+----------------------------+ -mysql> select strleft("Hello doris",NULL); -+------------------------------+ -| strleft('Hello doris', NULL) | -+------------------------------+ -| NULL | -+------------------------------+ -mysql> select strleft(NULL,3); -+------------------+ -| strleft(NULL, 3) | -+------------------+ -| NULL | -+------------------+ -``` -### keywords - STRLEFT, LEFT \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/strright.md b/docs/en/docs/sql-manual/sql-functions/string-functions/strright.md deleted file mode 100644 index 6ba2181b9eef6b..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/strright.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -{ - "title": "STRRIGHT", - "language": "en" -} ---- - - - - -## strright -### Description -#### Syntax - -`VARCHAR strright (VARCHAR str, INT len)` - - -It returns the right part of a string of specified length, length is char length not the byte size. Another alias for this function is `right`. -If the parameters contain a NULL value, the function will always return NULL. If the integer parameter is negative, the function will retrieve the portion of the string starting from the left and moving to the right, beginning at the absolute value of len. - -### example - -``` -mysql> select strright("Hello doris",5); -+-------------------------+ -| strright('Hello doris', 5) | -+-------------------------+ -| doris | -+-------------------------+ -mysql> select strright("Hello doris",-7); -+--------------------------+ -| strright('Hello doris', -7) | -+--------------------------+ -| doris | -+--------------------------+ -mysql> select strright("Hello doris",NULL); -+----------------------------+ -| strright('Hello doris', NULL) | -+----------------------------+ -| NULL | -+----------------------------+ -mysql> select strright(NULL,5); -+----------------+ -| strright(NULL, 5) | -+----------------+ -| NULL | -+----------------+ -``` -### keywords - STRRIGHT, RIGHT \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/sub-replace.md b/docs/en/docs/sql-manual/sql-functions/string-functions/sub-replace.md deleted file mode 100644 index 75b50211b488e3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/sub-replace.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ -"title": "SUB_REPLACE", -"language": "en" -} ---- - - - -## sub_replace -### Description -#### Syntax - -`VARCHAR sub_replace(VARCHAR str, VARCHAR new_str, INT start[, INT len])` - -Return with new_str replaces the str with length and starting position from start. -When start and len are negative integers, return NULL. -and the default value of len is the length of new_str. - -### example - -``` -mysql> select sub_replace("this is origin str","NEW-STR",1); -+-------------------------------------------------+ -| sub_replace('this is origin str', 'NEW-STR', 1) | -+-------------------------------------------------+ -| tNEW-STRorigin str | -+-------------------------------------------------+ - -mysql> select sub_replace("doris","***",1,2); -+-----------------------------------+ -| sub_replace('doris', '***', 1, 2) | -+-----------------------------------+ -| d***is | -+-----------------------------------+ -``` -### keywords - SUB_REPLACE diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/substring-index.md b/docs/en/docs/sql-manual/sql-functions/string-functions/substring-index.md deleted file mode 100644 index dea4986c70046f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/substring-index.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -{ -"title": "SUBSTRING_INDEX", -"language": "en" -} ---- - - - -## substring_index - -### Name - - - -SUBSTRING_INDEX - - - -### description - -#### Syntax - -`VARCHAR substring_index(VARCHAR content, VARCHAR delimiter, INT field)` - -Split `content` to two parts at position where the `field`s of `delimiter` stays, return one of them according to below rules: -if `field` is positive, return the left part; -else if `field` is negative, return the right part; -if `field` is zero, return an empty string when `content` is not null, else will return null. - -- `delimiter` is case sensitive and multi-byte safe. -- `delimiter` and `field` parameter should be constant. - - -### example - -``` -mysql> select substring_index("hello world", " ", 1); -+----------------------------------------+ -| substring_index("hello world", " ", 1) | -+----------------------------------------+ -| hello | -+----------------------------------------+ -mysql> select substring_index("hello world", " ", 2); -+----------------------------------------+ -| substring_index("hello world", " ", 2) | -+----------------------------------------+ -| hello world | -+----------------------------------------+ -mysql> select substring_index("hello world", " ", -1); -+-----------------------------------------+ -| substring_index("hello world", " ", -1) | -+-----------------------------------------+ -| world | -+-----------------------------------------+ -mysql> select substring_index("hello world", " ", -2); -+-----------------------------------------+ -| substring_index("hello world", " ", -2) | -+-----------------------------------------+ -| hello world | -+-----------------------------------------+ -mysql> select substring_index("hello world", " ", -3); -+-----------------------------------------+ -| substring_index("hello world", " ", -3) | -+-----------------------------------------+ -| hello world | -+-----------------------------------------+ -mysql> select substring_index("hello world", " ", 0); -+----------------------------------------+ -| substring_index("hello world", " ", 0) | -+----------------------------------------+ -| | -+----------------------------------------+ -``` -### keywords - - SUBSTRING_INDEX, SUBSTRING \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/substring.md b/docs/en/docs/sql-manual/sql-functions/string-functions/substring.md deleted file mode 100644 index b3a2cd3bc53dd1..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/substring.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -{ - "title": "SUBSTRING", - "language": "en" -} ---- - - - -## substring -### description -#### Syntax - -`VARCHAR substring(VARCHAR str, INT pos[, INT len])` - -The forms without a `len` argument return a substring from string `str` starting at position `pos`. -The forms with a `len` argument return a substring len characters long from string `str`, starting at position pos. -It is also possible to use a negative value for `pos`. In this case, -the beginning of the substring is `pos` characters from the end of the string, rather than the beginning. -A negative value may be used for `pos` in any of the forms of this function. -A value of 0 for `pos` returns an empty string. - -For all forms of SUBSTRING(), -the position of the first character in the string from which the substring is to be extracted is reckoned as 1. - -If len is less than 1, the result is the empty string. - -The function have a alias named `substr`. - -### example - -``` -mysql> select substring('abc1', 2); -+-----------------------------+ -| substring('abc1', 2) | -+-----------------------------+ -| bc1 | -+-----------------------------+ - -mysql> select substring('abc1', -2); -+-----------------------------+ -| substring('abc1', -2) | -+-----------------------------+ -| c1 | -+-----------------------------+ - -mysql> select substring('abc1', 0); -+----------------------+ -| substring('abc1', 0) | -+----------------------+ -| | -+----------------------+ - -mysql> select substring('abc1', 5); -+-----------------------------+ -| substring('abc1', 5) | -+-----------------------------+ -| | -+-----------------------------+ - -mysql> select substring('abc1def', 2, 2); -+-----------------------------+ -| substring('abc1def', 2, 2) | -+-----------------------------+ -| bc | -+-----------------------------+ -``` - -### keywords - SUBSTRING, STRING, SUBSTR \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/to-base64.md b/docs/en/docs/sql-manual/sql-functions/string-functions/to-base64.md deleted file mode 100644 index 345a11955fe889..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/to-base64.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "TO_BASE64", - "language": "en" -} ---- - - - -## to_base64 -### description -#### Syntax - -`VARCHAR to_base64(VARCHAR str)` - - -Returns the result of Base64 encoding the input string - -### example - -``` -mysql> select to_base64('1'); -+----------------+ -| to_base64('1') | -+----------------+ -| MQ== | -+----------------+ - -mysql> select to_base64('234'); -+------------------+ -| to_base64('234') | -+------------------+ -| MjM0 | -+------------------+ -``` -### keywords - to_base64 diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/trim.md b/docs/en/docs/sql-manual/sql-functions/string-functions/trim.md deleted file mode 100644 index 8503db9ce3c9d4..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/trim.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "TRIM", - "language": "en" -} ---- - - - -## trim -### description -#### Syntax - -`VARCHAR trim(VARCHAR str[, VARCHAR rhs])` - - -When the 'rhs' parameter is not present, remove the continuous spaces that appear from the starting and ending of the 'str' parameter. Otherwise, remove 'rhs'. - -### example - -``` -mysql> SELECT trim(' ab d ') str; -+------+ -| str | -+------+ -| ab d | -+------+ - -mysql> SELECT trim('ababccaab','ab') str; -+------+ -| str | -+------+ -| cca | -+------+ -``` -### keywords - TRIM diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/ucase.md b/docs/en/docs/sql-manual/sql-functions/string-functions/ucase.md deleted file mode 100644 index cf1160806f0b58..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/ucase.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -{ - "title": "UCASE", - "language": "en" -} ---- - - - -## ucase -### description -#### Syntax - -`VARCHAR ucase(VARCHAR str)` - - -Convert all strings in parameters to uppercase. Another alias for this function is [upper](./upper.md). - -### keywords - UCASE diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/unhex.md b/docs/en/docs/sql-manual/sql-functions/string-functions/unhex.md deleted file mode 100644 index ffbc4db2c1426f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/unhex.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -{ - "title": "UNHEX", - "language": "en" -} ---- - - - -## unhex -### description -#### Syntax - -`VARCHAR unhex(VARCHAR str)` - -Enter a string, if the length of the string is 0 or an odd number, an empty string is returned; -If the string contains characters other than `[0-9], [a-f], [A-F]`, an empty string is returned; -In other cases, every two characters are a group of characters converted into hexadecimal, and then spliced into a string for output. - - -### example - -``` -mysql> select unhex('@'); -+------------+ -| unhex('@') | -+------------+ -| | -+------------+ - -mysql> select unhex('41'); -+-------------+ -| unhex('41') | -+-------------+ -| A | -+-------------+ - -mysql> select unhex('4142'); -+---------------+ -| unhex('4142') | -+---------------+ -| AB | -+---------------+ -``` -### keywords - UNHEX diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/upper.md b/docs/en/docs/sql-manual/sql-functions/string-functions/upper.md deleted file mode 100644 index ad23316a53c049..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/upper.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -{ - "title": "UPPER", - "language": "en" -} ---- - - - -## upper -### description -#### Syntax - -`VARCHAR upper(VARCHAR str)` - - -Convert all strings in parameters to uppercase. Another alias for this function is [ucase](./ucase.md). - -### example - -``` -mysql> SELECT upper("aBc123"); -+-----------------+ -| upper('aBc123') | -+-----------------+ -| ABC123 | -+-----------------+ -``` -### keywords - UPPER diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/url-decode.md b/docs/en/docs/sql-manual/sql-functions/string-functions/url-decode.md deleted file mode 100644 index b09f0f4635d727..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/url-decode.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "url_decode", - "language": "en" -} ---- - - - -## url_decode -### description - -Converts an url to a decode string. - -#### Syntax - -```sql -VARCHAR url_decode(VARCHAR url) -``` - -### Parameters - -- url: the string to decode. If url is not a string type. - -### example - -``` -mysql> mysql> select url_decode('https%3A%2F%2Fdoris.apache.org%2Fzh-CN%2Fdocs%2Fsql-manual%2Fsql-functions%2Fstring-functions'); -+------------------------------------------------+ -| url_decode('https%3A%2F%2Fdoris.apache.org%2Fzh-CN%2Fdocs%2Fsql-manual%2Fsql-functions%2Fstring-functions') | -+------------------------------------------------+ -| https://doris.apache.org/zh-CN/docs/sql-manual/sql-functions/string-functions | -+------------------------------------------------+ -``` - -### keywords - URL DECODE diff --git a/docs/en/docs/sql-manual/sql-functions/string-functions/uuid.md b/docs/en/docs/sql-manual/sql-functions/string-functions/uuid.md deleted file mode 100644 index 6348cc4a4ad43d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/string-functions/uuid.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "UUID", - "language": "en" -} ---- - - - -## uuid - - - -uuid - - - -### description -#### Syntax - -`VARCHAR uuid()` - -return a random uuid string - - -### example - -``` -mysql> select uuid(); -+--------------------------------------+ -| uuid() | -+--------------------------------------+ -| 29077778-fc5e-4603-8368-6b5f8fd55c24 | -+--------------------------------------+ - -``` - -### keywords - UUID diff --git a/docs/en/docs/sql-manual/sql-functions/struct-functions/named-struct.md b/docs/en/docs/sql-manual/sql-functions/struct-functions/named-struct.md deleted file mode 100644 index bda2a23857b8b1..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/struct-functions/named-struct.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -{ - "title": "NAMED_STRUCT", - "language": "en" -} ---- - - - -## named_struct - - - -named_struct - - - -### description - -#### Syntax - -`STRUCT named_struct({VARCHAR, T1}, {VARCHAR, T2}, ...)` - -Construct a struct with the given field names and values. - -The number of parameters must be non zero and even. With odd digits being the name of the field and could be string literal, with even digits being the value of the field and could be column or literal. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> select named_struct('f1', 1, 'f2', 'a', 'f3', "abc"); -+-----------------------------------------------+ -| named_struct('f1', 1, 'f2', 'a', 'f3', 'abc') | -+-----------------------------------------------+ -| {1, 'a', 'abc'} | -+-----------------------------------------------+ -1 row in set (0.01 sec) - -mysql> select named_struct('a', null, 'b', "v"); -+-----------------------------------+ -| named_struct('a', NULL, 'b', 'v') | -+-----------------------------------+ -| {NULL, 'v'} | -+-----------------------------------+ -1 row in set (0.01 sec) - -mysql> select named_struct('f1', k1, 'f2', k2, 'f3', null) from test_tb; -+--------------------------------------------------+ -| named_struct('f1', `k1`, 'f2', `k2`, 'f3', NULL) | -+--------------------------------------------------+ -| {1, 'a', NULL} | -+--------------------------------------------------+ -1 row in set (0.02 sec) -``` - -### keywords - -NAMED, STRUCT, NAMED_STRUCT diff --git a/docs/en/docs/sql-manual/sql-functions/struct-functions/struct-element.md b/docs/en/docs/sql-manual/sql-functions/struct-functions/struct-element.md deleted file mode 100644 index cab57741a91ad0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/struct-functions/struct-element.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -{ - "title": "STRUCT_ELEMENT", - "language": "en" -} ---- - - - -## struct_element - - - -struct_element - - - -### description - -Function allows getting a field from a struct. - -#### Syntax - -``` -struct_element(struct, n/s) -``` - -#### Arguments - -``` -struct - The input struct column. If null, null will be returned. -n - The position of field,starting from 1,only supports constants. -s - The name of field,only supports constants. -``` - -#### Returned value - -Returns the specified field column, of any type. - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> select struct_element(named_struct('f1', 1, 'f2', 'a'), 'f2'); -+--------------------------------------------------------+ -| struct_element(named_struct('f1', 1, 'f2', 'a'), 'f2') | -+--------------------------------------------------------+ -| a | -+--------------------------------------------------------+ -1 row in set (0.03 sec) - -mysql> select struct_element(named_struct('f1', 1, 'f2', 'a'), 1); -+-----------------------------------------------------+ -| struct_element(named_struct('f1', 1, 'f2', 'a'), 1) | -+-----------------------------------------------------+ -| 1 | -+-----------------------------------------------------+ -1 row in set (0.02 sec) - -mysql> select struct_col, struct_element(struct_col, 'f1') from test_struct; -+-------------------------------------------------+-------------------------------------+ -| struct_col | struct_element(`struct_col `, 'f1') | -+-------------------------------------------------+-------------------------------------+ -| {1, 2, 3, 4, 5} | 1 | -| {1, 1000, 10000000, 100000000000, 100000000000} | 1 | -| {5, 4, 3, 2, 1} | 5 | -| NULL | NULL | -| {1, NULL, 3, NULL, 5} | 1 | -+-------------------------------------------------+-------------------------------------+ -9 rows in set (0.01 sec) -``` - -### keywords - -STRUCT, ELEMENT, STRUCT_ELEMENT \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/struct-functions/struct.md b/docs/en/docs/sql-manual/sql-functions/struct-functions/struct.md deleted file mode 100644 index c7157bea630cce..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/struct-functions/struct.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -{ - "title": "STRUCT", - "language": "en" -} ---- - - - -## struct() - - - -struct() - - - -### description - -#### Syntax - -`STRUCT struct(T1, T2, T3, ...)` - -construct an struct with variadic elements and return it, Tn could be column or literal - -### notice - -`Only supported in vectorized engine` - -### example - -``` -mysql> select struct(1, 'a', "abc"); -+-----------------------+ -| struct(1, 'a', 'abc') | -+-----------------------+ -| {1, 'a', 'abc'} | -+-----------------------+ -1 row in set (0.03 sec) - -mysql> select struct(null, 1, null); -+-----------------------+ -| struct(NULL, 1, NULL) | -+-----------------------+ -| {NULL, 1, NULL} | -+-----------------------+ -1 row in set (0.02 sec) - -mysql> select struct(cast('2023-03-16' as datetime)); -+----------------------------------------+ -| struct(CAST('2023-03-16' AS DATETIME)) | -+----------------------------------------+ -| {2023-03-16 00:00:00} | -+----------------------------------------+ -1 row in set (0.01 sec) - -mysql> select struct(k1, k2, null) from test_tb; -+--------------------------+ -| struct(`k1`, `k2`, NULL) | -+--------------------------+ -| {1, 'a', NULL} | -+--------------------------+ -1 row in set (0.04 sec) -``` - -### keywords - -STRUCT,CONSTRUCTOR \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/active_queries.md b/docs/en/docs/sql-manual/sql-functions/table-functions/active_queries.md deleted file mode 100644 index cbc0e20845d00a..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/active_queries.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -{ - "title": "ACTIVE_QUERIES", - "language": "en" -} ---- - - - -## `active_queries` - -### Name - - - -active_queries - - - -### description - -Table-Value-Function, generate a temporary table named active_queries. This tvf is used to view the information of running queries in doris cluster. - -This function is used in FROM clauses. - -#### syntax -`active_queries()` - -active_queries() table schema: -``` -mysql [(none)]>desc function active_queries(); -+------------------+--------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+------------------+--------+------+-------+---------+-------+ -| QueryId | TEXT | No | false | NULL | NONE | -| StartTime | TEXT | No | false | NULL | NONE | -| QueryTimeMs | BIGINT | No | false | NULL | NONE | -| WorkloadGroupId | BIGINT | No | false | NULL | NONE | -| Database | TEXT | No | false | NULL | NONE | -| FrontendInstance | TEXT | No | false | NULL | NONE | -| Sql | TEXT | No | false | NULL | NONE | -+------------------+--------+------+-------+---------+-------+ -7 rows in set (0.00 sec) -``` - -### example -``` -mysql [(none)]>select * from active_queries(); -+-----------------------------------+---------------------+-------------+-----------------+----------+------------------+--------------------------------+ -| QueryId | StartTime | QueryTimeMs | WorkloadGroupId | Database | FrontendInstance | Sql | -+-----------------------------------+---------------------+-------------+-----------------+----------+------------------+--------------------------------+ -| a84bf9f3ea6348e1-ac542839f8f2af5d | 2024-03-04 17:33:09 | 9 | 10002 | | localhost | select * from active_queries() | -+-----------------------------------+---------------------+-------------+-----------------+----------+------------------+--------------------------------+ -1 row in set (0.03 sec) -``` - -### keywords - - active_queries diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/backends.md b/docs/en/docs/sql-manual/sql-functions/table-functions/backends.md deleted file mode 100644 index f5639c42d725d3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/backends.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -{ - "title": "BACKENDS", - "language": "en" -} ---- - - - -## `backends` - -### Name - - - -backends - - - -### description - -Table-Value-Function, generate a temporary table named `backends`. This tvf is used to view the information of BE nodes in the doris cluster. - -This function is used in `FROM` clauses. - -#### syntax - -`backends()` - -The table schema of `backends()` tvf: -``` -mysql> desc function backends(); -+-------------------------+---------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------------------------+---------+------+-------+---------+-------+ -| BackendId | BIGINT | No | false | NULL | NONE | -| Host | TEXT | No | false | NULL | NONE | -| HeartbeatPort | INT | No | false | NULL | NONE | -| BePort | INT | No | false | NULL | NONE | -| HttpPort | INT | No | false | NULL | NONE | -| BrpcPort | INT | No | false | NULL | NONE | -| LastStartTime | TEXT | No | false | NULL | NONE | -| LastHeartbeat | TEXT | No | false | NULL | NONE | -| Alive | BOOLEAN | No | false | NULL | NONE | -| SystemDecommissioned | BOOLEAN | No | false | NULL | NONE | -| TabletNum | BIGINT | No | false | NULL | NONE | -| DataUsedCapacity | BIGINT | No | false | NULL | NONE | -| AvailCapacity | BIGINT | No | false | NULL | NONE | -| TotalCapacity | BIGINT | No | false | NULL | NONE | -| UsedPct | DOUBLE | No | false | NULL | NONE | -| MaxDiskUsedPct | DOUBLE | No | false | NULL | NONE | -| RemoteUsedCapacity | BIGINT | No | false | NULL | NONE | -| Tag | TEXT | No | false | NULL | NONE | -| ErrMsg | TEXT | No | false | NULL | NONE | -| Version | TEXT | No | false | NULL | NONE | -| Status | TEXT | No | false | NULL | NONE | -| HeartbeatFailureCounter | INT | No | false | NULL | NONE | -| NodeRole | TEXT | No | false | NULL | NONE | -+-------------------------+---------+------+-------+---------+-------+ -23 rows in set (0.002 sec) -``` - -The information displayed by the `backends` tvf is basically consistent with the information displayed by the `show backends` statement. However, the types of each field in the `backends` tvf are more specific, and you can use the `backends` tvf to perform operations such as filtering and joining. - -The information displayed by the `backends` tvf is authenticated, which is consistent with the behavior of `show backends`, user must have ADMIN/OPERATOR privelege. - -### example -``` -mysql> select * from backends()\G -*************************** 1. row *************************** - BackendId: 10002 - Host: 10.xx.xx.90 - HeartbeatPort: 9053 - BePort: 9063 - HttpPort: 8043 - BrpcPort: 8069 - LastStartTime: 2023-06-15 16:51:02 - LastHeartbeat: 2023-06-15 17:09:58 - Alive: 1 - SystemDecommissioned: 0 - TabletNum: 21 - DataUsedCapacity: 0 - AvailCapacity: 5187141550081 - TotalCapacity: 7750977622016 - UsedPct: 33.077583202570978 - MaxDiskUsedPct: 33.077583202583881 - RemoteUsedCapacity: 0 - Tag: {"location" : "default"} - ErrMsg: - Version: doris-0.0.0-trunk-4b18cde0c7 - Status: {"lastSuccessReportTabletsTime":"2023-06-15 17:09:02","lastStreamLoadTime":-1,"isQueryDisabled":false,"isLoadDisabled":false} -HeartbeatFailureCounter: 0 - NodeRole: mix -1 row in set (0.038 sec) -``` - -### keywords - - backends \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/catalogs.md b/docs/en/docs/sql-manual/sql-functions/table-functions/catalogs.md deleted file mode 100644 index e748297da7ff04..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/catalogs.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -{ - "title": "CATALOGS", - "language": "en" -} ---- - - - -## `catalogs` - -### Name - - -catalogs - - -### description - -The table function generates a temporary table of catalogs to view the information of the catalogs created in the current Doris. - -This function is used in the from clause. - -#### syntax - -`catalogs()` - -Catalogs () table structure: -``` -mysql> desc function catalogs(); -+-------------+--------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------------+--------+------+-------+---------+-------+ -| CatalogId | BIGINT | No | false | NULL | NONE | -| CatalogName | TEXT | No | false | NULL | NONE | -| CatalogType | TEXT | No | false | NULL | NONE | -| Property | TEXT | No | false | NULL | NONE | -| Value | TEXT | No | false | NULL | NONE | -+-------------+--------+------+-------+---------+-------+ -5 rows in set (0.04 sec) -``` - -The information presented by `catalogs()` tvf is the result of synthesizing `show catalogs` and `show catalog xxx` statements. - -The table generated by tvf can be used for filtering, join and other operations. - - -### example - -``` -mysql> select * from catalogs(); -+-----------+-------------+-------------+--------------------------------------------+---------------------------------------------------------------------------+ -| CatalogId | CatalogName | CatalogType | Property | Value | -+-----------+-------------+-------------+--------------------------------------------+---------------------------------------------------------------------------+ -| 16725 | hive | hms | dfs.client.failover.proxy.provider.HANN | org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider | -| 16725 | hive | hms | dfs.ha.namenodes.HANN | nn1,nn2 | -| 16725 | hive | hms | create_time | 2023-07-13 16:24:38.968 | -| 16725 | hive | hms | ipc.client.fallback-to-simple-auth-allowed | true | -| 16725 | hive | hms | dfs.namenode.rpc-address.HANN.nn1 | nn1_host:rpc_port | -| 16725 | hive | hms | hive.metastore.uris | thrift://127.0.0.1:7004 | -| 16725 | hive | hms | dfs.namenode.rpc-address.HANN.nn2 | nn2_host:rpc_port | -| 16725 | hive | hms | type | hms | -| 16725 | hive | hms | dfs.nameservices | HANN | -| 0 | internal | internal | NULL | NULL | -| 16726 | es | es | create_time | 2023-07-13 16:24:44.922 | -| 16726 | es | es | type | es | -| 16726 | es | es | hosts | http://127.0.0.1:9200 | -+-----------+-------------+-------------+--------------------------------------------+---------------------------------------------------------------------------+ -13 rows in set (0.01 sec) -``` - -### keywords - - catalogs diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/explode-bitmap.md b/docs/en/docs/sql-manual/sql-functions/table-functions/explode-bitmap.md deleted file mode 100644 index be661647d5ce95..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/explode-bitmap.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -{ - "title": "EXPLODE_BITMAP", - "language": "en" -} ---- - - - -## explode_bitmap - -### description - -Table functions must be used in conjunction with Lateral View. - -Expand a bitmap type. - -#### syntax - -`explode_bitmap(bitmap)` - -### example - -Original table data: - -``` -mysql> select k1 from example1 order by k1; -+------+ -| k1 | -+------+ -| 1 | -| 2 | -| 3 | -| 4 | -| 5 | -| 6 | -+------+ -``` - -Lateral View: - -``` -mysql> select k1, e1 from example1 lateral view explode_bitmap(bitmap_empty()) tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 2 | NULL | -| 3 | NULL | -| 4 | NULL | -| 5 | NULL | -| 6 | NULL | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_bitmap(bitmap_from_string("1")) tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | 1 | -| 2 | 1 | -| 3 | 1 | -| 4 | 1 | -| 5 | 1 | -| 6 | 1 | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_bitmap(bitmap_from_string("1,2")) tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | 1 | -| 1 | 2 | -| 2 | 1 | -| 2 | 2 | -| 3 | 1 | -| 3 | 2 | -| 4 | 1 | -| 4 | 2 | -| 5 | 1 | -| 5 | 2 | -| 6 | 1 | -| 6 | 2 | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_bitmap(bitmap_from_string("1,1000")) tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | 1 | -| 1 | 1000 | -| 2 | 1 | -| 2 | 1000 | -| 3 | 1 | -| 3 | 1000 | -| 4 | 1 | -| 4 | 1000 | -| 5 | 1 | -| 5 | 1000 | -| 6 | 1 | -| 6 | 1000 | -+------+------+ - -mysql> select k1, e1, e2 from example1 -lateral view explode_bitmap(bitmap_from_string("1,1000")) tmp1 as e1 -lateral view explode_split("a,b", ",") tmp2 as e2 order by k1, e1, e2; -+------+------+------+ -| k1 | e1 | e2 | -+------+------+------+ -| 1 | 1 | a | -| 1 | 1 | b | -| 1 | 1000 | a | -| 1 | 1000 | b | -| 2 | 1 | a | -| 2 | 1 | b | -| 2 | 1000 | a | -| 2 | 1000 | b | -| 3 | 1 | a | -| 3 | 1 | b | -| 3 | 1000 | a | -| 3 | 1000 | b | -| 4 | 1 | a | -| 4 | 1 | b | -| 4 | 1000 | a | -| 4 | 1000 | b | -| 5 | 1 | a | -| 5 | 1 | b | -| 5 | 1000 | a | -| 5 | 1000 | b | -| 6 | 1 | a | -| 6 | 1 | b | -| 6 | 1000 | a | -| 6 | 1000 | b | -+------+------+------+ -``` - -### keywords - -explode,bitmap,explode_bitmap \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/explode-json-array.md b/docs/en/docs/sql-manual/sql-functions/table-functions/explode-json-array.md deleted file mode 100644 index 0444ec33a405e3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/explode-json-array.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -{ - "title": "EXPLODE_JSON_ARRAY", - "language": "en" -} ---- - - - -## explode_json_array - -### description - -Table functions must be used in conjunction with Lateral View. - -Expand a json array. According to the array element type, there are three function names. Corresponding to integer, floating point and string arrays respectively. - -#### syntax - -```sql -explode_json_array_int(json_str) -explode_json_array_double(json_str) -explode_json_array_string(json_str) -explode_json_array_json(json_str) -``` - -### example - -Original table data: - -``` -mysql> select k1 from example1 order by k1; -+------+ -| k1 | -+------+ -| 1 | -| 2 | -| 3 | -+------+ -``` - -Lateral View: - -``` -mysql> select k1, e1 from example1 lateral view explode_json_array_int('[]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 2 | NULL | -| 3 | NULL | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_int('[1,2,3]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | 1 | -| 1 | 2 | -| 1 | 3 | -| 2 | 1 | -| 2 | 2 | -| 2 | 3 | -| 3 | 1 | -| 3 | 2 | -| 3 | 3 | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_int('[1,"b",3]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 1 | 1 | -| 1 | 3 | -| 2 | NULL | -| 2 | 1 | -| 2 | 3 | -| 3 | NULL | -| 3 | 1 | -| 3 | 3 | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_int('["a","b","c"]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 1 | NULL | -| 1 | NULL | -| 2 | NULL | -| 2 | NULL | -| 2 | NULL | -| 3 | NULL | -| 3 | NULL | -| 3 | NULL | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_int('{"a": 3}') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 2 | NULL | -| 3 | NULL | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_double('[]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 2 | NULL | -| 3 | NULL | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_double('[1,2,3]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 1 | NULL | -| 1 | NULL | -| 2 | NULL | -| 2 | NULL | -| 2 | NULL | -| 3 | NULL | -| 3 | NULL | -| 3 | NULL | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_double('[1,"b",3]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 1 | NULL | -| 1 | NULL | -| 2 | NULL | -| 2 | NULL | -| 2 | NULL | -| 3 | NULL | -| 3 | NULL | -| 3 | NULL | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_double('[1.0,2.0,3.0]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | 1 | -| 1 | 2 | -| 1 | 3 | -| 2 | 1 | -| 2 | 2 | -| 2 | 3 | -| 3 | 1 | -| 3 | 2 | -| 3 | 3 | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_double('[1,"b",3]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 1 | NULL | -| 1 | NULL | -| 2 | NULL | -| 2 | NULL | -| 2 | NULL | -| 3 | NULL | -| 3 | NULL | -| 3 | NULL | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_double('["a","b","c"]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 1 | NULL | -| 1 | NULL | -| 2 | NULL | -| 2 | NULL | -| 2 | NULL | -| 3 | NULL | -| 3 | NULL | -| 3 | NULL | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_double('{"a": 3}') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 2 | NULL | -| 3 | NULL | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_string('[]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 2 | NULL | -| 3 | NULL | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_string('[1.0,2.0,3.0]') tmp1 as e1 order by k1, e1; -+------+----------+ -| k1 | e1 | -+------+----------+ -| 1 | 1.000000 | -| 1 | 2.000000 | -| 1 | 3.000000 | -| 2 | 1.000000 | -| 2 | 2.000000 | -| 2 | 3.000000 | -| 3 | 1.000000 | -| 3 | 2.000000 | -| 3 | 3.000000 | -+------+----------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_string('[1,"b",3]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | 1 | -| 1 | 3 | -| 1 | b | -| 2 | 1 | -| 2 | 3 | -| 2 | b | -| 3 | 1 | -| 3 | 3 | -| 3 | b | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_string('["a","b","c"]') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | a | -| 1 | b | -| 1 | c | -| 2 | a | -| 2 | b | -| 2 | c | -| 3 | a | -| 3 | b | -| 3 | c | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_string('{"a": 3}') tmp1 as e1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | NULL | -| 2 | NULL | -| 3 | NULL | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_json_array_json('[{"id":1,"name":"John"},{"id":2,"name":"Mary"},{"id":3,"name":"Bob"}]') tmp1 as e1 order by k1, e1; -+------+------------------------+ -| k1 | e1 | -+------+------------------------+ -| 1 | {"id":1,"name":"John"} | -| 1 | {"id":2,"name":"Mary"} | -| 1 | {"id":3,"name":"Bob"} | -| 2 | {"id":1,"name":"John"} | -| 2 | {"id":2,"name":"Mary"} | -| 2 | {"id":3,"name":"Bob"} | -| 3 | {"id":1,"name":"John"} | -| 3 | {"id":2,"name":"Mary"} | -| 3 | {"id":3,"name":"Bob"} | -+------+------------------------+ -``` - -### keywords - -explode,json,array,json_array,explode_json,explode_json_array diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/explode-numbers-outer.md b/docs/en/docs/sql-manual/sql-functions/table-functions/explode-numbers-outer.md deleted file mode 100644 index c0432e300ef01b..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/explode-numbers-outer.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -{ - "title": "EXPLODE_NUMBERS_OUTER", - "language": "en" -} ---- - - - -## outer combinator - -### description - -#### syntax -`explode_numbers(INT x)` - -Adding the `_outer` suffix after the function name of the table function changes the function behavior from `non-outer` to `outer`, and adds a row of `Null` data when the table function generates 0 rows of data. - -### example - -``` -mysql> select e1 from (select 1 k1) as t lateral view explode_numbers(0) tmp1 as e1; -Empty set - -mysql> select e1 from (select 1 k1) as t lateral view explode_numbers_outer(0) tmp1 as e1; -+------+ -| e1 | -+------+ -| NULL | -+------+ -``` -### keywords - - outer \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/explode-numbers.md b/docs/en/docs/sql-manual/sql-functions/table-functions/explode-numbers.md deleted file mode 100644 index 159043713c527f..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/explode-numbers.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "EXPLODE_NUMBERS", - "language": "en" -} ---- - - - -## explode_numbers - -### description - -Table functions must be used in conjunction with Lateral View. - -Get a number sequence [0,n). - -#### syntax - -`explode_numbers(n)` - -### example -``` -mysql> select e1 from (select 1 k1) as t lateral view explode_numbers(5) tmp1 as e1; -+------+ -| e1 | -+------+ -| 0 | -| 1 | -| 2 | -| 3 | -| 4 | -+------+ -``` -### keywords - -explode,numbers,explode_numbers \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/explode-split.md b/docs/en/docs/sql-manual/sql-functions/table-functions/explode-split.md deleted file mode 100644 index 1ccf91c40aa568..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/explode-split.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -{ - "title": "EXPLODE_SPLIT", - "language": "en" -} ---- - - - -## explode_split - -### description -#### syntax - -`explode_split(str, delimiter)` - -Table functions must be used in conjunction with Lateral View. - -Split a string into multiple substrings according to the specified delimiter. - -grammar: - -``` -explode_split(str, delimiter) -``` - -### example - -Original table data: - -``` -mysql> select * from example1 order by k1; -+------+---------+ -| k1 | k2 | -+------+---------+ -| 1 | | -| 2 | NULL | -| 3 | , | -| 4 | 1 | -| 5 | 1,2,3 | -| 6 | a, b, c | -+------+---------+ -``` - -Lateral View: - -``` -mysql> select k1, e1 from example1 lateral view explode_split(k2, ',') tmp1 as e1 where k1 = 1 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 1 | | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_split(k2, ',') tmp1 as e1 where k1 = 2 order by k1, e1; -Empty set - -mysql> select k1, e1 from example1 lateral view explode_split(k2, ',') tmp1 as e1 where k1 = 3 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 3 | | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_split(k2, ',') tmp1 as e1 where k1 = 4 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 4 | 1 | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_split(k2, ',') tmp1 as e1 where k1 = 5 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 5 | 2 | -| 5 | 3 | -| 5 | 1 | -+------+------+ - -mysql> select k1, e1 from example1 lateral view explode_split(k2, ',') tmp1 as e1 where k1 = 6 order by k1, e1; -+------+------+ -| k1 | e1 | -+------+------+ -| 6 | b | -| 6 | c | -| 6 | a | -+------+------+ -``` - -### keywords - -explode,split,explode_split \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/explode.md b/docs/en/docs/sql-manual/sql-functions/table-functions/explode.md deleted file mode 100644 index d6349e76058648..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/explode.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -{ - "title": "EXPLODE", - "language": "en" -} ---- - - - -## explode - -### description - -Table functions must be used in conjunction with Lateral View. - -explode array column to rows. `explode_outer` will return NULL, while `array` is NULL or empty. -`explode` and `explode_outer` both keep the nested NULL elements of array. - -#### syntax -```sql -explode(expr) -explode_outer(expr) -``` - -### example -``` -mysql> set enable_vectorized_engine = true - -mysql> select e1 from (select 1 k1) as t lateral view explode([1,2,3]) tmp1 as e1; -+------+ -| e1 | -+------+ -| 1 | -| 2 | -| 3 | -+------+ - -mysql> select e1 from (select 1 k1) as t lateral view explode_outer(null) tmp1 as e1; -+------+ -| e1 | -+------+ -| NULL | -+------+ - -mysql> select e1 from (select 1 k1) as t lateral view explode([]) tmp1 as e1; -Empty set (0.010 sec) - -mysql> select e1 from (select 1 k1) as t lateral view explode([null,1,null]) tmp1 as e1; -+------+ -| e1 | -+------+ -| NULL | -| 1 | -| NULL | -+------+ - -mysql> select e1 from (select 1 k1) as t lateral view explode_outer([null,1,null]) tmp1 as e1; -+------+ -| e1 | -+------+ -| NULL | -| 1 | -| NULL | -+------+ -``` - -### keywords -EXPLODE,EXPLODE_OUTER,ARRAY \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/explode_map.md b/docs/en/docs/sql-manual/sql-functions/table-functions/explode_map.md deleted file mode 100644 index 7c5cae597cb3ee..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/explode_map.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -{ - "title": "EXPLODE_MAP", - "language": "en" -} ---- - - - -## explode - -### description - -Table functions must be used in conjunction with Lateral View, support multi conjunction with Lateral View,support new optimizer only. - -explode map column to rows. `explode_map_outer` will return NULL, while `map` is NULL or empty. -`explode_map` and `explode_map_outer` both keep the nested NULL elements of map. - -#### syntax -```sql -explode_map(expr) -explode_map_outer(expr) -``` - -### example -``` -mysql> set enable_vectorized_engine = true -mysql> SET enable_nereids_planner=true -mysql> SET enable_fallback_to_original_planner=false - -mysql> CREATE TABLE IF NOT EXISTS `sdu`( - `id` INT NULL, - `name` TEXT NULL, - `score` MAP NULL - ) ENGINE=OLAP - DUPLICATE KEY(`id`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`id`) BUCKETS 1 - PROPERTIES ("replication_allocation" = "tag.location.default: 1"); -Query OK, 0 rows affected (0.15 sec) - -mysql> insert into sdu values (0, "zhangsan", {"Chinese":"80","Math":"60","English":"90"}), (1, "lisi", {"null":null}), (2, "wangwu", {"Chinese":"88","Math":"90","English":"96"}), (3, "lisi2", {null:null}), (4, "amory", NULL); -Query OK, 5 rows affected (0.23 sec) -{'label':'label_9b35d9d9d59147f5_bffb974881ed2133', 'status':'VISIBLE', 'txnId':'4005'} - -mysql> select * from sdu order by id; -+------+----------+-----------------------------------------+ -| id | name | score | -+------+----------+-----------------------------------------+ -| 0 | zhangsan | {"Chinese":80, "Math":60, "English":90} | -| 1 | lisi | {"null":null} | -| 2 | wangwu | {"Chinese":88, "Math":90, "English":96} | -| 3 | lisi2 | {null:null} | -| 4 | amory | NULL | -+------+----------+-----------------------------------------+ - -mysql> select name, k,v from sdu lateral view explode_map(score) tmp as k,v; -+----------+---------+------+ -| name | k | v | -+----------+---------+------+ -| zhangsan | Chinese | 80 | -| zhangsan | Math | 60 | -| zhangsan | English | 90 | -| lisi | null | NULL | -| wangwu | Chinese | 88 | -| wangwu | Math | 90 | -| wangwu | English | 96 | -| lisi2 | NULL | NULL | -+----------+---------+------+ - -mysql> select name, k,v from sdu lateral view explode_map_outer(score) tmp as k,v; -+----------+---------+------+ -| name | k | v | -+----------+---------+------+ -| zhangsan | Chinese | 80 | -| zhangsan | Math | 60 | -| zhangsan | English | 90 | -| lisi | null | NULL | -| wangwu | Chinese | 88 | -| wangwu | Math | 90 | -| wangwu | English | 96 | -| lisi2 | NULL | NULL | -| amory | NULL | NULL | -+----------+---------+------+ - -mysql> select name, k,v,k1,v1 from sdu lateral view explode_map_outer(score) tmp as k,v lateral view explode_map(score) tmp2 as k1,v1; -+----------+---------+------+---------+------+ -| name | k | v | k1 | v1 | -+----------+---------+------+---------+------+ -| zhangsan | Chinese | 80 | Chinese | 80 | -| zhangsan | Chinese | 80 | Math | 60 | -| zhangsan | Chinese | 80 | English | 90 | -| zhangsan | Math | 60 | Chinese | 80 | -| zhangsan | Math | 60 | Math | 60 | -| zhangsan | Math | 60 | English | 90 | -| zhangsan | English | 90 | Chinese | 80 | -| zhangsan | English | 90 | Math | 60 | -| zhangsan | English | 90 | English | 90 | -| lisi | null | NULL | null | NULL | -| wangwu | Chinese | 88 | Chinese | 88 | -| wangwu | Chinese | 88 | Math | 90 | -| wangwu | Chinese | 88 | English | 96 | -| wangwu | Math | 90 | Chinese | 88 | -| wangwu | Math | 90 | Math | 90 | -| wangwu | Math | 90 | English | 96 | -| wangwu | English | 96 | Chinese | 88 | -| wangwu | English | 96 | Math | 90 | -| wangwu | English | 96 | English | 96 | -| lisi2 | NULL | NULL | NULL | NULL | -+----------+---------+------+---------+------+ -``` - -### keywords -EXPLODE_MAP,EXPLODE_MAP_OUTER,MAP \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/frontends.md b/docs/en/docs/sql-manual/sql-functions/table-functions/frontends.md deleted file mode 100644 index c78f19a64987b8..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/frontends.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -{ - "title": "FRONTENDS", - "language": "en" -} ---- - - - -## `frontends` - -### Name - - - -frontends - - - -### description - -Table-Value-Function, generate a temporary table named `frontends`. This tvf is used to view the information of BE nodes in the doris cluster. - -This function is used in `FROM` clauses. - -#### syntax - -`frontends()` - -The table schema of `frontends()` tvf: -``` -mysql> desc function frontends(); -+-------------------+------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------------------+------+------+-------+---------+-------+ -| Name | TEXT | No | false | NULL | NONE | -| Host | TEXT | No | false | NULL | NONE | -| EditLogPort | TEXT | No | false | NULL | NONE | -| HttpPort | TEXT | No | false | NULL | NONE | -| QueryPort | TEXT | No | false | NULL | NONE | -| RpcPort | TEXT | No | false | NULL | NONE | -| ArrowFlightSqlPort| TEXT | No | false | NULL | NONE | -| Role | TEXT | No | false | NULL | NONE | -| IsMaster | TEXT | No | false | NULL | NONE | -| ClusterId | TEXT | No | false | NULL | NONE | -| Join | TEXT | No | false | NULL | NONE | -| Alive | TEXT | No | false | NULL | NONE | -| ReplayedJournalId | TEXT | No | false | NULL | NONE | -| LastHeartbeat | TEXT | No | false | NULL | NONE | -| IsHelper | TEXT | No | false | NULL | NONE | -| ErrMsg | TEXT | No | false | NULL | NONE | -| Version | TEXT | No | false | NULL | NONE | -| CurrentConnected | TEXT | No | false | NULL | NONE | -+-------------------+------+------+-------+---------+-------+ -17 rows in set (0.022 sec) -``` - -The information displayed by the `frontends` tvf is basically consistent with the information displayed by the `show frontends` statement. However, the types of each field in the `frontends` tvf are more specific, and you can use the `frontends` tvf to perform operations such as filtering and joining. - -The information displayed by the `frontends` tvf is authenticated, which is consistent with the behavior of `show frontends`, user must have ADMIN/OPERATOR privelege. - -### example -``` -mysql> select * from frontends()\G -*************************** 1. row *************************** - Name: fe_5fa8bf19_fd6b_45cb_89c5_25a5ebc45582 - IP: 10.xx.xx.14 - EditLogPort: 9013 - HttpPort: 8034 - QueryPort: 9033 - RpcPort: 9023 -ArrowFlightSqlPort: 9040 - Role: FOLLOWER - IsMaster: true - ClusterId: 1258341841 - Join: true - Alive: true -ReplayedJournalId: 186 - LastHeartbeat: 2023-06-15 16:53:12 - IsHelper: true - ErrMsg: - Version: doris-0.0.0-trunk-4b18cde0c7 - CurrentConnected: Yes -1 row in set (0.060 sec) -``` - -### keywords - - frontends \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/frontends_disks.md b/docs/en/docs/sql-manual/sql-functions/table-functions/frontends_disks.md deleted file mode 100644 index 5bda913d7201f3..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/frontends_disks.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -{ - "title": "frontends_disks", - "language": "en" -} ---- - - - -## `frontends` - -### Name - - - -frontends - - - -### description - -Table-Value-Function, generate a temporary table named `frontends_disks`. This tvf is used to view the information of FE nodes 's disks in the doris cluster. - -This function is used in `FROM` clauses. - -#### syntax - -`frontends_disks()` - -The table schema of `frontends_disks()` tvf: -``` -mysql> desc function frontends_disks(); -+-------------+------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------------+------+------+-------+---------+-------+ -| Name | TEXT | No | false | NULL | NONE | -| Host | TEXT | No | false | NULL | NONE | -| DirType | TEXT | No | false | NULL | NONE | -| Dir | TEXT | No | false | NULL | NONE | -| Filesystem | TEXT | No | false | NULL | NONE | -| Capacity | TEXT | No | false | NULL | NONE | -| Used | TEXT | No | false | NULL | NONE | -| Available | TEXT | No | false | NULL | NONE | -| UseRate | TEXT | No | false | NULL | NONE | -| MountOn | TEXT | No | false | NULL | NONE | -+-------------+------+------+-------+---------+-------+ -11 rows in set (0.14 sec) -``` - -The information displayed by the `frontends_disks` tvf is basically consistent with the information displayed by the `show frontends disks` statement. However, the types of each field in the `frontends_disks` tvf are more specific, and you can use the `frontends_disks` tvf to perform operations such as filtering and joining. - -The information displayed by the `frontends_disks` tvf is authenticated, which is consistent with the behavior of `show frontends disks`, user must have ADMIN/OPERATOR privelege. - -### example -``` -mysql> select * from frontends_disk()\G -*************************** 1. row *************************** - Name: fe_fe1d5bd9_d1e5_4ccc_9b03_ca79b95c9941 - Host: 172.XX.XX.1 - DirType: log - Dir: /data/doris/fe-github/log - Filesystem: /dev/sdc5 - Capacity: 366G - Used: 119G - Available: 228G - UseRate: 35% - MountOn: /data -...... -12 row in set (0.03 sec) -``` - -### keywords - - frontends_disks \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/hdfs.md b/docs/en/docs/sql-manual/sql-functions/table-functions/hdfs.md deleted file mode 100644 index 6e319421988385..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/hdfs.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -{ - "title": "HDFS", - "language": "en" -} ---- - - - -## HDFS - -### Name - -hdfs - -### Description - -HDFS table-valued-function(tvf), allows users to read and access file contents on S3-compatible object storage, just like accessing relational table. Currently supports `csv/csv_with_names/csv_with_names_and_types/json/parquet/orc` file format. - -#### syntax - -```sql -hdfs( - "uri" = "..", - "fs.defaultFS" = "...", - "hadoop.username" = "...", - "format" = "csv", - "keyn" = "valuen" - ... - ); -``` - -**parameter description** - -Related parameters for accessing hdfs: - -- `uri`: (required) hdfs uri. If the uri path does not exist or the files are empty files, hdfs tvf will return an empty result set. -- `fs.defaultFS`: (required) -- `hadoop.username`: (required) Can be any string, but cannot be empty. -- `hadoop.security.authentication`: (optional) -- `hadoop.username`: (optional) -- `hadoop.kerberos.principal`: (optional) -- `hadoop.kerberos.keytab`: (optional) -- `dfs.client.read.shortcircuit`: (optional) -- `dfs.domain.socket.path`: (optional) - -Related parameters for accessing HDFS in HA mode: -- `dfs.nameservices`: (optional) -- `dfs.ha.namenodes.your-nameservices`: (optional) -- `dfs.namenode.rpc-address.your-nameservices.your-namenode`: (optional) -- `dfs.client.failover.proxy.provider.your-nameservices`: (optional) - -File format parameters: - -- `format`: (required) Currently support `csv/csv_with_names/csv_with_names_and_types/json/parquet/orc/avro` -- `column_separator`: (optional) default `,`. -- `line_delimiter`: (optional) default `\n`. -- `compress_type`: (optional) Currently support `UNKNOWN/PLAIN/GZ/LZO/BZ2/LZ4FRAME/DEFLATE`. Default value is `UNKNOWN`, it will automatically infer the type based on the suffix of `uri`. - - The following 6 parameters are used for loading in json format. For specific usage methods, please refer to: [Json Load](../../../data-operate/import/import-way/load-json-format.md) - -- `read_json_by_line`: (optional) default `"true"` -- `strip_outer_array`: (optional) default `"false"` -- `json_root`: (optional) default `""` -- `json_paths`: (optional) default `""` -- `num_as_string`: (optional) default `false` -- `fuzzy_parse`: (optional) default `false` - - The following 2 parameters are used for loading in csv format - -- `trim_double_quotes`: Boolean type (optional), the default value is `false`. True means that the outermost double quotes of each field in the csv file are trimmed. -- `skip_lines`: Integer type (optional), the default value is 0. It will skip some lines in the head of csv file. It will be disabled when the format is `csv_with_names` or `csv_with_names_and_types`. - -other kinds of parameters: - -- `path_partition_keys`: (optional) Specifies the column names carried in the file path. For example, if the file path is /path/to/city=beijing/date="2023-07-09", you should fill in `path_partition_keys="city,date"`. It will automatically read the corresponding column names and values from the path during load process. - -### Examples - -Read and access csv format files on hdfs storage. - -```sql -MySQL [(none)]> select * from hdfs( - "uri" = "hdfs://127.0.0.1:842/user/doris/csv_format_test/student.csv", - "fs.defaultFS" = "hdfs://127.0.0.1:8424", - "hadoop.username" = "doris", - "format" = "csv"); -+------+---------+------+ -| c1 | c2 | c3 | -+------+---------+------+ -| 1 | alice | 18 | -| 2 | bob | 20 | -| 3 | jack | 24 | -| 4 | jackson | 19 | -| 5 | liming | 18 | -+------+---------+------+ -``` - -Read and access csv format files on hdfs storage in HA mode. -```sql -MySQL [(none)]> select * from hdfs( - "uri" = "hdfs://127.0.0.1:842/user/doris/csv_format_test/student.csv", - "fs.defaultFS" = "hdfs://127.0.0.1:8424", - "hadoop.username" = "doris", - "format" = "csv", - "dfs.nameservices" = "my_hdfs", - "dfs.ha.namenodes.my_hdfs" = "nn1,nn2", - "dfs.namenode.rpc-address.my_hdfs.nn1" = "nanmenode01:8020", - "dfs.namenode.rpc-address.my_hdfs.nn2" = "nanmenode02:8020", - "dfs.client.failover.proxy.provider.my_hdfs" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"); -+------+---------+------+ -| c1 | c2 | c3 | -+------+---------+------+ -| 1 | alice | 18 | -| 2 | bob | 20 | -| 3 | jack | 24 | -| 4 | jackson | 19 | -| 5 | liming | 18 | -+------+---------+------+ -``` - -Can be used with `desc function` : - -```sql -MySQL [(none)]> desc function hdfs( - "uri" = "hdfs://127.0.0.1:8424/user/doris/csv_format_test/student_with_names.csv", - "fs.defaultFS" = "hdfs://127.0.0.1:8424", - "hadoop.username" = "doris", - "format" = "csv_with_names"); -``` - -### Keywords - - hdfs, table-valued-function, tvf - -### Best Practice - - For more detailed usage of HDFS tvf, please refer to [S3](./s3.md) tvf, The only difference between them is the way of accessing the storage system. diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/iceberg-meta.md b/docs/en/docs/sql-manual/sql-functions/table-functions/iceberg-meta.md deleted file mode 100644 index b9cc9d3a1f1df8..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/iceberg-meta.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -{ -"title": "ICEBERG_META", -"language": "en" -} ---- - - - -## iceberg_meta - -### Name - - - -iceberg_meta - - - -### description - -iceberg_meta table-valued-function(tvf), Use for read iceberg metadata,operation history, snapshots of table, file metadata etc. - -#### syntax - -```sql -iceberg_meta( - "table" = "ctl.db.tbl", - "query_type" = "snapshots" - ... - ); -``` - -**parameter description** - -Each parameter in iceberg_meta tvf is a pair of `"key"="value"`. - -Related parameters: -- `table`: (required) Use iceberg table name the format `catlog.database.table`. -- `query_type`: (required) The type of iceberg metadata. Only `snapshots` is currently supported. - -### Example - -Read and access the iceberg tabular metadata for snapshots. - -```sql -select * from iceberg_meta("table" = "ctl.db.tbl", "query_type" = "snapshots"); - -``` - -Can be used with `desc function` : - -```sql -desc function iceberg_meta("table" = "ctl.db.tbl", "query_type" = "snapshots"); -``` - -### Keywords - - iceberg_meta, table-valued-function, tvf - -### Best Prac - -Inspect the iceberg table snapshots : - -```sql -select * from iceberg_meta("table" = "iceberg_ctl.test_db.test_tbl", "query_type" = "snapshots"); -+------------------------+----------------+---------------+-----------+-------------------+------------------------------+ -| committed_at | snapshot_id | parent_id | operation | manifest_list | summary | -+------------------------+----------------+---------------+-----------+-------------------+------------------------------+ -| 2022-09-20 11:14:29 | 64123452344 | -1 | append | hdfs:/path/to/m1 | {"flink.job-id":"xxm1", ...} | -| 2022-09-21 10:36:35 | 98865735822 | 64123452344 | overwrite | hdfs:/path/to/m2 | {"flink.job-id":"xxm2", ...} | -| 2022-09-21 21:44:11 | 51232845315 | 98865735822 | overwrite | hdfs:/path/to/m3 | {"flink.job-id":"xxm3", ...} | -+------------------------+----------------+---------------+-----------+-------------------+------------------------------+ -``` - -Filtered by snapshot_id : - -```sql -select * from iceberg_meta("table" = "iceberg_ctl.test_db.test_tbl", "query_type" = "snapshots") -where snapshot_id = 98865735822; -+------------------------+----------------+---------------+-----------+-------------------+------------------------------+ -| committed_at | snapshot_id | parent_id | operation | manifest_list | summary | -+------------------------+----------------+---------------+-----------+-------------------+------------------------------+ -| 2022-09-21 10:36:35 | 98865735822 | 64123452344 | overwrite | hdfs:/path/to/m2 | {"flink.job-id":"xxm2", ...} | -+------------------------+----------------+---------------+-----------+-------------------+------------------------------+ -``` diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/job.md b/docs/en/docs/sql-manual/sql-functions/table-functions/job.md deleted file mode 100644 index 372c63f80996f5..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/job.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -{ - "title": "JOB", - "language": "en" -} ---- - - - -## `job` - -### Name - - - -job - - - -### description - -Table function, generates a temporary table of jobs, which allows you to view the information of jobs in the current Doris cluster. - -This function is used in the FROM clause. - -#### syntax - -**parameter description** - -| parameter | description | type | required | -|:----------|:------------|:-------|:---------| -| type | job type | string | yes | - -the **type** supported types -- insert: insert into type job - - -##### Insert Job - -The table schema of `tasks("type"="insert");` tvf: - -``` -mysql> desc function jobs("type"="insert") -+-------------------+------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------------------+------+------+-------+---------+-------+ -| Id | TEXT | No | false | NULL | NONE | -| Name | TEXT | No | false | NULL | NONE | -| Definer | TEXT | No | false | NULL | NONE | -| ExecuteType | TEXT | No | false | NULL | NONE | -| RecurringStrategy | TEXT | No | false | NULL | NONE | -| Status | TEXT | No | false | NULL | NONE | -| ExecuteSql | TEXT | No | false | NULL | NONE | -| CreateTime | TEXT | No | false | NULL | NONE | -| Comment | TEXT | No | false | NULL | NONE | -+-------------------+------+------+-------+---------+-------+ -``` - -### example - -``` -mysql> select * from jobs("type"="insert") where Name='kris'\G -*************************** 1. row *************************** - Id: 10069 - Name: kris - Definer: root - ExecuteType: RECURRING -RecurringStrategy: EVERY 3 SECOND STARTS 2023-12-06 14:44:47 - Status: RUNNING - ExecuteSql: insert into address select * from mysqluser.orders.address where 'create_time' >= days_add(now(),-1) - CreateTime: 2023-12-06 14:44:44 - Comment: load mysql address datas -1 row in set (0.04 sec) -``` - -### keywords - - job, insert, schedule - diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/jobs.md b/docs/en/docs/sql-manual/sql-functions/table-functions/jobs.md deleted file mode 100644 index 602ef985dbabaf..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/jobs.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -{ - "title": "JOBS", - "language": "en" -} ---- - - - -## `jobs` - -### Name - -jobs - -### description - -Table function, generating a temporary task table, which can view job information in a certain task type. - -This function is used in the from clause. - -#### syntax - -`jobs("type"="")` - -jobs("type"="mv")Table structure: -```sql -mysql> desc function jobs("type"="mv"); -+-------------------+------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------------------+------+------+-------+---------+-------+ -| Id | TEXT | No | false | NULL | NONE | -| Name | TEXT | No | false | NULL | NONE | -| MvId | TEXT | No | false | NULL | NONE | -| MvName | TEXT | No | false | NULL | NONE | -| MvDatabaseId | TEXT | No | false | NULL | NONE | -| MvDatabaseName | TEXT | No | false | NULL | NONE | -| ExecuteType | TEXT | No | false | NULL | NONE | -| RecurringStrategy | TEXT | No | false | NULL | NONE | -| Status | TEXT | No | false | NULL | NONE | -| CreateTime | TEXT | No | false | NULL | NONE | -+-------------------+------+------+-------+---------+-------+ -10 rows in set (0.00 sec) -``` - -* Id: job ID. -* Name: job name. -* MvId: Materialized View ID -* MvName: Materialized View Name -* MvDatabaseId: DB ID of the materialized view -* MvDatabaseName: Name of the database to which the materialized view belongs -* ExecuteType: Execution type -* RecurringStrategy: Loop strategy -* Status: Job status -* CreateTime: Task creation time - -### example - -1. View jobs in all materialized views - -```sql -mysql> select * from jobs("type"="mv"); -``` - -2. View job with name `inner_mtmv_75043` - -```sql -mysql> select * from jobs("type"="mv") where Name="inner_mtmv_75043"; -``` - -### keywords - - jobs diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/local.md b/docs/en/docs/sql-manual/sql-functions/table-functions/local.md deleted file mode 100644 index ab3be7d4aeff83..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/local.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -{ - "title": "local", - "language": "en" -} ---- - - - -## Local - -### Name - - - -local - - - -### Description - -Local table-valued-function(tvf), allows users to read and access local file contents on be node, just like accessing relational table. Currently supports `csv/csv_with_names/csv_with_names_and_types/json/parquet/orc` file format. - -It needs `ADMIN` privilege to use. - -#### syntax - -```sql -local( - "file_path" = "path/to/file.txt", - "backend_id" = "be_id", - "format" = "csv", - "keyn" = "valuen" - ... - ); -``` - -**parameter description** - -Related parameters for accessing local file on be node: - -- `file_path`: - - (required) The path of the file to be read, which is a relative path to the `user_files_secure_path` directory, where `user_files_secure_path` parameter [can be configured on be](../../../admin-manual/config/be-config.md). - - Can not contains `..` in path. Support using glob syntax to match multi files, such as `log/*.log` - -- `backend_id`: - - (required) The backend id where the file resides. The `backend_id` can be obtained by `show backends` command. - -File format parameters: - -- `format`: (required) Currently support `csv/csv_with_names/csv_with_names_and_types/json/parquet/orc` -- `column_separator`: (optional) default `,`. -- `line_delimiter`: (optional) default `\n`. -- `compress_type`: (optional) Currently support `UNKNOWN/PLAIN/GZ/LZO/BZ2/LZ4FRAME/DEFLATE`. Default value is `UNKNOWN`, it will automatically infer the type based on the suffix of `uri`. - - The following 6 parameters are used for loading in json format. For specific usage methods, please refer to: [Json Load](../../../data-operate/import/import-way/load-json-format.md) - -- `read_json_by_line`: (optional) default `"true"` -- `strip_outer_array`: (optional) default `"false"` -- `json_root`: (optional) default `""` -- `json_paths`: (optional) default `""` -- `num_as_string`: (optional) default `false` -- `fuzzy_parse`: (optional) default `false` - - The following 2 parameters are used for loading in csv format - -- `trim_double_quotes`: Boolean type (optional), the default value is `false`. True means that the outermost double quotes of each field in the csv file are trimmed. -- `skip_lines`: Integer type (optional), the default value is 0. It will skip some lines in the head of csv file. It will be disabled when the format is `csv_with_names` or `csv_with_names_and_types`. - -### Examples - -Analyze the log file on specified BE: - -```sql -mysql> select * from local( - "file_path" = "log/be.out", - "backend_id" = "10006", - "format" = "csv") - where c1 like "%start_time%" limit 10; -+--------------------------------------------------------+ -| c1 | -+--------------------------------------------------------+ -| start time: 2023年 08月 07日 星期一 23:20:32 CST | -| start time: 2023年 08月 07日 星期一 23:32:10 CST | -| start time: 2023年 08月 08日 星期二 00:20:50 CST | -| start time: 2023年 08月 08日 星期二 00:29:15 CST | -+--------------------------------------------------------+ -``` - -Read and access csv format files located at path `${DORIS_HOME}/student.csv`: - -```sql -mysql> select * from local( - "file_path" = "student.csv", - "backend_id" = "10003", - "format" = "csv"); -+------+---------+--------+ -| c1 | c2 | c3 | -+------+---------+--------+ -| 1 | alice | 18 | -| 2 | bob | 20 | -| 3 | jack | 24 | -| 4 | jackson | 19 | -| 5 | liming | d18 | -+------+---------+--------+ -``` - -Can be used with `desc function` : - -```sql -mysql> desc function local( - "file_path" = "student.csv", - "backend_id" = "10003", - "format" = "csv"); -+-------+------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------+------+------+-------+---------+-------+ -| c1 | TEXT | Yes | false | NULL | NONE | -| c2 | TEXT | Yes | false | NULL | NONE | -| c3 | TEXT | Yes | false | NULL | NONE | -+-------+------+------+-------+---------+-------+ -``` - -### Keywords - - local, table-valued-function, tvf - -### Best Practice - - For more detailed usage of local tvf, please refer to [S3](./s3.md) tvf, The only difference between them is the way of accessing the storage system. diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/mv_infos.md b/docs/en/docs/sql-manual/sql-functions/table-functions/mv_infos.md deleted file mode 100644 index e9c9c24c9a7d5e..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/mv_infos.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -{ - "title": "MV_INFOS", - "language": "en" -} ---- - - - -## `mv_infos` - -### Name - -mv_infos - -### description - -Table function, generating temporary tables for asynchronous materialized views, which can view information about asynchronous materialized views created in a certain database. - -This function is used in the from clause. - -#### syntax - -`mv_infos("database"="")` - -mv_infos() Table structure: -```sql -mysql> desc function mv_infos("database"="tpch100"); -+--------------------+---------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+--------------------+---------+------+-------+---------+-------+ -| Id | BIGINT | No | false | NULL | NONE | -| Name | TEXT | No | false | NULL | NONE | -| JobName | TEXT | No | false | NULL | NONE | -| State | TEXT | No | false | NULL | NONE | -| SchemaChangeDetail | TEXT | No | false | NULL | NONE | -| RefreshState | TEXT | No | false | NULL | NONE | -| RefreshInfo | TEXT | No | false | NULL | NONE | -| QuerySql | TEXT | No | false | NULL | NONE | -| EnvInfo | TEXT | No | false | NULL | NONE | -| MvProperties | TEXT | No | false | NULL | NONE | -| MvPartitionInfo | TEXT | No | false | NULL | NONE | -| SyncWithBaseTables | BOOLEAN | No | false | NULL | NONE | -+--------------------+---------+------+-------+---------+-------+ -12 rows in set (0.01 sec) -``` - -* Id: Materialized View ID -* Name: Materialized View Name -* JobName: The job name corresponding to the materialized view -* State: Materialized View State -* SchemaChangeDetail: The reason why the materialized view State becomes a SchemeChange -* RefreshState: Materialized view refresh status -* RefreshInfo: Refreshing strategy information defined by materialized views -* QuerySql: Query statements defined by materialized views -* EnvInfo: Environmental information during the creation of materialized views -* MvProperties: Materialized visual attributes -* MvPartitionInfo: Partition information of materialized views -* SyncWithBaseTables:Is it synchronized with the base table data? To see which partition is not synchronized, please use [SHOW PARTITIONS](../sql-reference/Show-Statements/SHOW-PARTITIONS.md) - -### example - -1. View all materialized views under db1 - -```sql -mysql> select * from mv_infos("database"="db1"); -``` - -2. View the materialized view named mv1 under db1 - -```sql -mysql> select * from mv_infos("database"="db1") where Name = "mv1"; -``` - -3. View the status of the materialized view named mv1 under db1 - -```sql -mysql> select State from mv_infos("database"="db1") where Name = "mv1"; -``` - -### keywords - - mv, infos diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/numbers.md b/docs/en/docs/sql-manual/sql-functions/table-functions/numbers.md deleted file mode 100644 index 723d62154aa898..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/numbers.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -{ - "title": "NUMBERS", - "language": "en" -} ---- - - - -## `numbers` - -### description - -Table function that generates a temporary table containing only one column with the column name `number` and all element values are `const_value` if `const_value` is specified, otherwise they are [0,`number`) incremented. - -#### syntax -```sql -numbers( - "number" = "n" - <, "const_value" = "x"> - ); -``` - -parameter: -- `number`: Line number. -- `const_value`: the constant value. - -### example -``` -mysql> select * from numbers("number" = "5"); -+--------+ -| number | -+--------+ -| 0 | -| 1 | -| 2 | -| 3 | -| 4 | -+--------+ -5 rows in set (0.11 sec) - -mysql> select * from numbers("number" = "5", "const_value" = "-123"); -+--------+ -| number | -+--------+ -| -123 | -| -123 | -| -123 | -| -123 | -| -123 | -+--------+ -5 rows in set (0.12 sec) -``` - -### keywords - - numbers, const_value \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/s3.md b/docs/en/docs/sql-manual/sql-functions/table-functions/s3.md deleted file mode 100644 index e410bf396497eb..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/s3.md +++ /dev/null @@ -1,519 +0,0 @@ ---- -{ - "title": "S3", - "language": "en" -} ---- - - - -## S3 - -### Name -S3 - -### description - -S3 table-valued-function(tvf), allows users to read and access file contents on S3-compatible object storage, just like accessing relational table. Currently supports `csv/csv_with_names/csv_with_names_and_types/json/parquet/orc` file format. - -#### syntax - -```sql -s3( - "uri" = "..", - "s3.access_key" = "...", - "s3.secret_key" = "...", - "s3.region" = "...", - "format" = "csv", - "keyn" = "valuen", - ... - ); -``` - -**parameter description** - -Each parameter in S3 tvf is a pair of `"key"="value"`. - -Related parameters for accessing S3: - -- `uri`: (required) The S3 tvf will decide whether to use the path style access method according to the `use_path_style` parameter, and the default access method is the virtual-hosted style method. -- `s3.access_key`: (required) -- `s3.secret_key`: (required) -- `s3.region`: (optional). Mandatory if the Minio has set another region. Otherwise, `us-east-1` is used by default. -- `s3.session_token`: (optional) -- `use_path_style`: (optional) default `false` . The S3 SDK uses the virtual-hosted style by default. However, some object storage systems may not be enabled or support virtual-hosted style access. At this time, we can add the `use_path_style` parameter to force the use of path style access method. - -> Note: URI currently supports three SCHEMA: http://, https:// and s3://. -> 1. If you use http:// or https://, you will decide whether to use the 'path style' to access s3 based on the 'use_path_style' parameter -> 2. If you use s3://, you will use the "virtual-hosted style' to access the s3, 'use_path_style' parameter is invalid. -> 3. If the uri path does not exist or the files are empty files, s3 tvf will return an empty result set. -> -> For detailed use cases, you can refer to Best Practice at the bottom. - -file format parameter: - -- `format`: (required) Currently support `csv/csv_with_names/csv_with_names_and_types/json/parquet/orc` -- `column_separator`: (optional) default `,`. -- `line_delimiter`: (optional) default `\n`. -- `compress_type`: (optional) Currently support `UNKNOWN/PLAIN/GZ/LZO/BZ2/LZ4FRAME/DEFLATE`. Default value is `UNKNOWN`, it will automatically infer the type based on the suffix of `uri`. - - The following 6 parameters are used for loading in json format. For specific usage methods, please refer to: [Json Load](../../../data-operate/import/import-way/load-json-format.md) - -- `read_json_by_line`: (optional) default `"true"` -- `strip_outer_array`: (optional) default `"false"` -- `json_root`: (optional) default `""` -- `jsonpaths`: (optional) default `""` -- `num_as_string`: (optional) default `"false"` -- `fuzzy_parse`: (optional) default `"false"` - - The following 2 parameters are used for loading in csv format - -- `trim_double_quotes`: Boolean type (optional), the default value is `false`. True means that the outermost double quotes of each field in the csv file are trimmed. -- `skip_lines`: Integer type (optional), the default value is 0. It will skip some lines in the head of csv file. It will be disabled when the format is `csv_with_names` or `csv_with_names_and_types`. - -other parameter: - -- `path_partition_keys`: (optional) Specifies the column names carried in the file path. For example, if the file path is /path/to/city=beijing/date="2023-07-09", you should fill in `path_partition_keys="city,date"`. It will automatically read the corresponding column names and values from the path during load process. - -### Example - -Read and access csv format files on S3-compatible object storage. - -```sql -select * from s3("uri" = "http://127.0.0.1:9312/test2/student1.csv", - "s3.access_key"= "minioadmin", - "s3.secret_key" = "minioadmin", - "format" = "csv", - "use_path_style" = "true") order by c1; -``` - -Can be used with `desc function` - -```sql -MySQL [(none)]> Desc function s3("uri" = "http://127.0.0.1:9312/test2/student1.csv", - "s3.access_key"= "minioadmin", - "s3.secret_key" = "minioadmin", - "format" = "csv", - "use_path_style" = "true"); -``` - -### Keywords - - s3, table-valued-function, tvf - -### Best Practice -Since the S3 table-valued-function does not know the table schema in advance, it will read the file first to parse out the table schema. - -**Usage of different uri schemas** -Example of http:// 、https:// - -```sql -// Note how to write your bucket of URI and set the 'use_path_style' parameter, as well as http://. -// Because of "use_path_style"="true", s3 will be accessed in 'path style'. -select * from s3( - "URI" = "https://endpoint/bucket/file/student.csv", - "s3.access_key"= "ak", - "s3.secret_key" = "sk", - "format" = "csv", - "use_path_style"="true"); - -// Note how to write your bucket of URI and set the 'use_path_style' parameter, as well as http://. -// Because of "use_path_style"="false", s3 will be accessed in 'virtual-hosted style'. -select * from s3( - "URI" = "https://bucket.endpoint/file/student.csv", - "s3.access_key"= "ak", - "s3.secret_key" = "sk", - "format" = "csv", - "use_path_style"="false"); - -// The OSS on Alibaba Cloud and The COS on Tencent Cloud will use 'virtual-hosted style' to access s3. -// OSS -select * from s3( - "URI" = "http://example-bucket.oss-cn-beijing.aliyuncs.com/your-folder/file.parquet", - "s3.access_key" = "ak", - "s3.secret_key" = "sk", - "region" = "oss-cn-beijing", - "format" = "parquet", - "use_path_style" = "false"); -// COS -select * from s3( - "URI" = "https://example-bucket.cos.ap-hongkong.myqcloud.com/your-folder/file.parquet", - "s3.access_key" = "ak", - "s3.secret_key" = "sk", - "region" = "ap-hongkong", - "format" = "parquet", - "use_path_style" = "false"); - -// The BOS on Baidu Cloud will use 'virtual-hosted style' compatible with the S3 protocol to access s3. -// BOS -select * from s3( - "uri" = "https://example-bucket.s3.bj.bcebos.com/your-folder/file.parquet", - "s3.access_key"= "ak", - "s3.secret_key" = "sk", - "s3.region" = "bj", - "format" = "parquet", - "use_path_style" = "false"); -``` - -Example of s3://: - -```sql -// Note how to write your bucket of URI, no need to set 'use_path_style'. -// s3 will be accessed in 'virtual-hosted style'. -select * from s3( - "URI" = "s3://bucket.endpoint/file/student.csv", - "s3.access_key"= "ak", - "s3.secret_key" = "sk", - "format" = "csv"); -``` - - -**csv format** -`csv` format: Read the file on S3 and process it as a csv file, read the first line in the file to parse out the table schema. The number of columns in the first line of the file `n` will be used as the number of columns in the table schema, and the column names of the table schema will be automatically named `c1, c2, ..., cn`, and the column type is set to `String` , for example: - - -The file content of student1.csv: - -``` -1,ftw,12 -2,zs,18 -3,ww,20 -``` - -use S3 tvf - -```sql -MySQL [(none)]> select * from s3("uri" = "http://127.0.0.1:9312/test2/student1.csv", --> "s3.access_key"= "minioadmin", --> "s3.secret_key" = "minioadmin", --> "format" = "csv", --> "use_path_style" = "true") order by c1; -+------+------+------+ -| c1 | c2 | c3 | -+------+------+------+ -| 1 | ftw | 12 | -| 2 | zs | 18 | -| 3 | ww | 20 | -+------+------+------+ -``` - -use `desc function S3()` to view the table schema - -```sql -MySQL [(none)]> Desc function s3("uri" = "http://127.0.0.1:9312/test2/student1.csv", --> "s3.access_key"= "minioadmin", --> "s3.secret_key" = "minioadmin", --> "format" = "csv", --> "use_path_style" = "true"); -+-------+------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------+------+------+-------+---------+-------+ -| c1 | TEXT | Yes | false | NULL | NONE | -| c2 | TEXT | Yes | false | NULL | NONE | -| c3 | TEXT | Yes | false | NULL | NONE | -+-------+------+------+-------+---------+-------+ -``` - -**csv_with_names format** -`csv_with_names` format: The first line of the file is used as the number and name of the columns of the table schema, and the column type is set to `String`, for example: - -The file content of student_with_names.csv: - -``` -id,name,age -1,ftw,12 -2,zs,18 -3,ww,20 -``` - -use S3 tvf - -```sql -MySQL [(none)]> select * from s3("uri" = "http://127.0.0.1:9312/test2/student_with_names.csv", --> "s3.access_key"= "minioadmin", --> "s3.secret_key" = "minioadmin", --> "format" = "csv_with_names", --> "use_path_style" = "true") order by id; -+------+------+------+ -| id | name | age | -+------+------+------+ -| 1 | ftw | 12 | -| 2 | zs | 18 | -| 3 | ww | 20 | -+------+------+------+ -``` - -```sql -MySQL [(none)]> Desc function s3("uri" = "http://127.0.0.1:9312/test2/student_with_names.csv", --> "s3.access_key"= "minioadmin", --> "s3.secret_key" = "minioadmin", --> "format" = "csv_with_names", --> "use_path_style" = "true"); -+-------+------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------+------+------+-------+---------+-------+ -| id | TEXT | Yes | false | NULL | NONE | -| name | TEXT | Yes | false | NULL | NONE | -| age | TEXT | Yes | false | NULL | NONE | -+-------+------+------+-------+---------+-------+ -``` - -**csv_with_names_and_types format** - -`csv_with_names_and_types` format: Currently, it does not support parsing the column type from a csv file. When using this format, S3 tvf will parse the first line of the file as the number and name of the columns of the table schema, and set the column type to String. Meanwhile, the second line of the file is ignored. - -The file content of student_with_names_and_types.csv: - -``` -id,name,age -INT,STRING,INT -1,ftw,12 -2,zs,18 -3,ww,20 -``` - -use S3 tvf - -```sql -MySQL [(none)]> select * from s3("uri" = "http://127.0.0.1:9312/test2/student_with_names_and_types.csv", --> "s3.access_key"= "minioadmin", --> "s3.secret_key" = "minioadmin", --> "format" = "csv_with_names_and_types", --> "use_path_style" = "true") order by id; -+------+------+------+ -| id | name | age | -+------+------+------+ -| 1 | ftw | 12 | -| 2 | zs | 18 | -| 3 | ww | 20 | -+------+------+------+ -``` - -```sql -MySQL [(none)]> Desc function s3("uri" = "http://127.0.0.1:9312/test2/student_with_names_and_types.csv", --> "s3.access_key"= "minioadmin", --> "s3.secret_key" = "minioadmin", --> "format" = "csv_with_names_and_types", --> "use_path_style" = "true"); -+-------+------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------+------+------+-------+---------+-------+ -| id | TEXT | Yes | false | NULL | NONE | -| name | TEXT | Yes | false | NULL | NONE | -| age | TEXT | Yes | false | NULL | NONE | -+-------+------+------+-------+---------+-------+ -``` - -**json format** - -`json` format: The json format involves many optional parameters, and the meaning of each parameter can be referred to: [Json Load](../../../data-operate/import/import-way/load-json-format.md). When S3 tvf queries the json format file, it locates a json object according to the `json_root` and `jsonpaths` parameters, and uses the `key` in the object as the column name of the table schema, and sets the column type to String. For example: - -The file content of data.json: - -``` -[{"id":1, "name":"ftw", "age":18}] -[{"id":2, "name":"xxx", "age":17}] -[{"id":3, "name":"yyy", "age":19}] -``` - -use S3 tvf: - -```sql -MySQL [(none)]> select * from s3( - "URI" = "http://127.0.0.1:9312/test2/data.json", - "s3.access_key"= "minioadmin", - "s3.secret_key" = "minioadmin", - "format" = "json", - "strip_outer_array" = "true", - "read_json_by_line" = "true", - "use_path_style"="true"); -+------+------+------+ -| id | name | age | -+------+------+------+ -| 1 | ftw | 18 | -| 2 | xxx | 17 | -| 3 | yyy | 19 | -+------+------+------+ - -MySQL [(none)]> select * from s3( - "URI" = "http://127.0.0.1:9312/test2/data.json", - "s3.access_key"= "minioadmin", - "s3.secret_key" = "minioadmin", - "format" = "json", - "strip_outer_array" = "true", - "jsonpaths" = "[\"$.id\", \"$.age\"]", - "use_path_style"="true"); -+------+------+ -| id | age | -+------+------+ -| 1 | 18 | -| 2 | 17 | -| 3 | 19 | -+------+------+ -``` - -**parquet format** - -`parquet` format: S3 tvf supports parsing the column names and column types of the table schema from the parquet file. Example: - -```sql -MySQL [(none)]> select * from s3( - "URI" = "http://127.0.0.1:9312/test2/test.snappy.parquet", - "s3.access_key"= "minioadmin", - "s3.secret_key" = "minioadmin", - "format" = "parquet", - "use_path_style"="true") limit 5; -+-----------+------------------------------------------+----------------+----------+-------------------------+--------+-------------+---------------+---------------------+ -| p_partkey | p_name | p_mfgr | p_brand | p_type | p_size | p_container | p_retailprice | p_comment | -+-----------+------------------------------------------+----------------+----------+-------------------------+--------+-------------+---------------+---------------------+ -| 1 | goldenrod lavender spring chocolate lace | Manufacturer#1 | Brand#13 | PROMO BURNISHED COPPER | 7 | JUMBO PKG | 901 | ly. slyly ironi | -| 2 | blush thistle blue yellow saddle | Manufacturer#1 | Brand#13 | LARGE BRUSHED BRASS | 1 | LG CASE | 902 | lar accounts amo | -| 3 | spring green yellow purple cornsilk | Manufacturer#4 | Brand#42 | STANDARD POLISHED BRASS | 21 | WRAP CASE | 903 | egular deposits hag | -| 4 | cornflower chocolate smoke green pink | Manufacturer#3 | Brand#34 | SMALL PLATED BRASS | 14 | MED DRUM | 904 | p furiously r | -| 5 | forest brown coral puff cream | Manufacturer#3 | Brand#32 | STANDARD POLISHED TIN | 15 | SM PKG | 905 | wake carefully | -+-----------+------------------------------------------+----------------+----------+-------------------------+--------+-------------+---------------+---------------------+ -``` - -```sql -MySQL [(none)]> desc function s3( - "URI" = "http://127.0.0.1:9312/test2/test.snappy.parquet", - "s3.access_key"= "minioadmin", - "s3.secret_key" = "minioadmin", - "format" = "parquet", - "use_path_style"="true"); -+---------------+--------------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+---------------+--------------+------+-------+---------+-------+ -| p_partkey | INT | Yes | false | NULL | NONE | -| p_name | TEXT | Yes | false | NULL | NONE | -| p_mfgr | TEXT | Yes | false | NULL | NONE | -| p_brand | TEXT | Yes | false | NULL | NONE | -| p_type | TEXT | Yes | false | NULL | NONE | -| p_size | INT | Yes | false | NULL | NONE | -| p_container | TEXT | Yes | false | NULL | NONE | -| p_retailprice | DECIMAL(9,0) | Yes | false | NULL | NONE | -| p_comment | TEXT | Yes | false | NULL | NONE | -+---------------+--------------+------+-------+---------+-------+ -``` - -**orc format** - -`orc` format: Same as `parquet` format, set `format` parameter to orc. - -```sql -MySQL [(none)]> select * from s3( - "URI" = "http://127.0.0.1:9312/test2/test.snappy.orc", - "s3.access_key"= "minioadmin", - "s3.secret_key" = "minioadmin", - "format" = "orc", - "use_path_style"="true") limit 5; -+-----------+------------------------------------------+----------------+----------+-------------------------+--------+-------------+---------------+---------------------+ -| p_partkey | p_name | p_mfgr | p_brand | p_type | p_size | p_container | p_retailprice | p_comment | -+-----------+------------------------------------------+----------------+----------+-------------------------+--------+-------------+---------------+---------------------+ -| 1 | goldenrod lavender spring chocolate lace | Manufacturer#1 | Brand#13 | PROMO BURNISHED COPPER | 7 | JUMBO PKG | 901 | ly. slyly ironi | -| 2 | blush thistle blue yellow saddle | Manufacturer#1 | Brand#13 | LARGE BRUSHED BRASS | 1 | LG CASE | 902 | lar accounts amo | -| 3 | spring green yellow purple cornsilk | Manufacturer#4 | Brand#42 | STANDARD POLISHED BRASS | 21 | WRAP CASE | 903 | egular deposits hag | -| 4 | cornflower chocolate smoke green pink | Manufacturer#3 | Brand#34 | SMALL PLATED BRASS | 14 | MED DRUM | 904 | p furiously r | -| 5 | forest brown coral puff cream | Manufacturer#3 | Brand#32 | STANDARD POLISHED TIN | 15 | SM PKG | 905 | wake carefully | -+-----------+------------------------------------------+----------------+----------+-------------------------+--------+-------------+---------------+---------------------+ -``` - -**avro format** - -`avro` format: S3 tvf supports parsing the column names and column types of the table schema from the avro file. Example: - -```sql -select * from s3( - "uri" = "http://127.0.0.1:9312/test2/person.avro", - "ACCESS_KEY" = "ak", - "SECRET_KEY" = "sk", - "FORMAT" = "avro"); -+--------+--------------+-------------+-----------------+ -| name | boolean_type | double_type | long_type | -+--------+--------------+-------------+-----------------+ -| Alyssa | 1 | 10.0012 | 100000000221133 | -| Ben | 0 | 5555.999 | 4009990000 | -| lisi | 0 | 5992225.999 | 9099933330 | -+--------+--------------+-------------+-----------------+ -``` - -**uri contains wildcards** - -uri can use wildcards to read multiple files. Note: If wildcards are used, the format of each file must be consistent (especially csv/csv_with_names/csv_with_names_and_types count as different formats), S3 tvf uses the first file to parse out the table schema. For example: - -The following two csv files: - -``` -// file1.csv -1,aaa,18 -2,qqq,20 -3,qwe,19 - -// file2.csv -5,cyx,19 -6,ftw,21 -``` - -You can use wildcards on the uri to query. - -```sql -MySQL [(none)]> select * from s3( - "URI" = "http://127.0.0.1:9312/test2/file*.csv", - "s3.access_key"= "minioadmin", - "s3.secret_key" = "minioadmin", - "format" = "csv", - "use_path_style"="true"); -+------+------+------+ -| c1 | c2 | c3 | -+------+------+------+ -| 1 | aaa | 18 | -| 2 | qqq | 20 | -| 3 | qwe | 19 | -| 5 | cyx | 19 | -| 6 | ftw | 21 | -+------+------+------+ -``` - -**Using `S3` tvf with `insert into` and `cast`** - -```sql -// Create doris internal table -CREATE TABLE IF NOT EXISTS ${testTable} - ( - id int, - name varchar(50), - age int - ) - COMMENT "my first table" - DISTRIBUTED BY HASH(id) BUCKETS 32 - PROPERTIES("replication_num" = "1"); - -// Insert data using S3 -insert into ${testTable} (id,name,age) -select cast (id as INT) as id, name, cast (age as INT) as age -from s3( - "uri" = "${uri}", - "s3.access_key"= "${ak}", - "s3.secret_key" = "${sk}", - "format" = "${format}", - "strip_outer_array" = "true", - "read_json_by_line" = "true", - "use_path_style" = "true"); -``` diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/tasks.md b/docs/en/docs/sql-manual/sql-functions/table-functions/tasks.md deleted file mode 100644 index 4cf497c89156ce..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/tasks.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -{ - "title": "TASKS", - "language": "en" -} ---- - - - -## `tasks` - -### Name - - - -tasks - - - -### description - -Table function, generates a temporary table of tasks, which allows you to view the information of tasks generated by jobs in the current Doris cluster. - -This function is used in the FROM clause. - -#### syntax - -`tasks("type"="insert");` -**parameter description** - -| parameter | description | type | required | -|:----------|:------------|:-------|:---------| -| type | job type | string | yes | - -the **type** supported types -- insert: insert into type job -- mv: materilized view type job - -##### Insert tasks - -The table schema of `tasks("type"="insert");` tvf: - -``` -mysql> desc function tasks("type"="insert"); -+---------------+------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+---------------+------+------+-------+---------+-------+ -| TaskId | TEXT | No | false | NULL | NONE | -| JobId | TEXT | No | false | NULL | NONE | -| Label | TEXT | No | false | NULL | NONE | -| Status | TEXT | No | false | NULL | NONE | -| EtlInfo | TEXT | No | false | NULL | NONE | -| TaskInfo | TEXT | No | false | NULL | NONE | -| ErrorMsg | TEXT | No | false | NULL | NONE | -| CreateTimeMs | TEXT | No | false | NULL | NONE | -| FinishTimeMs | TEXT | No | false | NULL | NONE | -| TrackingUrl | TEXT | No | false | NULL | NONE | -| LoadStatistic | TEXT | No | false | NULL | NONE | -| User | TEXT | No | false | NULL | NONE | -+---------------+------+------+-------+---------+-------+ -12 rows in set (0.01 sec) -``` -##### MV Tasks -```sql -mysql> desc function tasks("type"="mv"); -+-----------------------+------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-----------------------+------+------+-------+---------+-------+ -| TaskId | TEXT | No | false | NULL | NONE | -| JobId | TEXT | No | false | NULL | NONE | -| JobName | TEXT | No | false | NULL | NONE | -| MvId | TEXT | No | false | NULL | NONE | -| MvName | TEXT | No | false | NULL | NONE | -| MvDatabaseId | TEXT | No | false | NULL | NONE | -| MvDatabaseName | TEXT | No | false | NULL | NONE | -| Status | TEXT | No | false | NULL | NONE | -| ErrorMsg | TEXT | No | false | NULL | NONE | -| CreateTime | TEXT | No | false | NULL | NONE | -| StartTime | TEXT | No | false | NULL | NONE | -| FinishTime | TEXT | No | false | NULL | NONE | -| DurationMs | TEXT | No | false | NULL | NONE | -| TaskContext | TEXT | No | false | NULL | NONE | -| RefreshMode | TEXT | No | false | NULL | NONE | -| NeedRefreshPartitions | TEXT | No | false | NULL | NONE | -| CompletedPartitions | TEXT | No | false | NULL | NONE | -| Progress | TEXT | No | false | NULL | NONE | -+-----------------------+------+------+-------+---------+-------+ -18 rows in set (0.00 sec) -``` - -* TaskId: task id -* JobId: job id -* JobName: job Name -* MvId: Materialized View ID -* MvName: Materialized View Name -* MvDatabaseId: DB ID of the materialized view -* MvDatabaseName: Name of the database to which the materialized view belongs -* Status: task status -* ErrorMsg: Task failure information -* CreateTime: Task creation time -* StartTime: Task start running time -* FinishTime: Task End Run Time -* DurationMs: Task runtime -* TaskContext: Task running parameters -* RefreshMode: refresh mode -* NeedRefreshPartitions: The partition information that needs to be refreshed for this task -* CompletedPartitions: The partition information that has been refreshed for this task -* Progress: Task running progress -### example -#### Insert Tasls -``` -mysql> select * from tasks("type"="insert") limit 1 \G -*************************** 1. row *************************** - TaskId: 667704038678903 - JobId: 10069 - Label: 10069_667704038678903 - Status: FINISHED - EtlInfo: \N - TaskInfo: cluster:N/A; timeout(s):14400; max_filter_ratio:0.0; priority:NORMAL - ErrorMsg: \N - CreateTimeMs: 2023-12-08 16:46:57 - FinishTimeMs: 2023-12-08 16:46:57 - TrackingUrl: -LoadStatistic: {"Unfinished backends":{},"ScannedRows":0,"TaskNumber":0,"LoadBytes":0,"All backends":{},"FileNumber":0,"FileSize":0} - User: root -1 row in set (0.05 sec) - -``` -#### MV Tasks - -1. View tasks for all materialized views - -```sql -mysql> select * from tasks("type"="mv"); -``` - -2. View all tasks with jobName `inner_mtmv_75043` - -```sql -mysql> select * from tasks("type"="mv") where JobName="inner_mtmv_75043"; -``` - - -### keywords - - tasks, job, insert, mv, materilized view \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/table-functions/workload-group.md b/docs/en/docs/sql-manual/sql-functions/table-functions/workload-group.md deleted file mode 100644 index 596cb07b527aba..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/table-functions/workload-group.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -{ - "title": "WORKLOAD_GROUPS", - "language": "en" -} ---- - - - -## `workload_groups` - -### Name - - - -workload_groups - - - -### description - -Table-Value-Function, generate a temporary table named `workload_groups`. This tvf is used to view information about workload groups for which current user has permission. - -This function is used in `FROM` clauses. - -#### syntax - -`workload_groups()` - -The table schema of `workload_groups()` tvf: -``` -mysql> desc function workload_groups(); -+-------+-------------+------+-------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------+-------------+------+-------+---------+-------+ -| Id | BIGINT | No | false | NULL | NONE | -| Name | STRING | No | false | NULL | NONE | -| Item | STRING | No | false | NULL | NONE | -| Value | STRING | No | false | NULL | NONE | -+-------+-------------+------+-------+---------+-------+ -``` - -### example -``` -mysql> select * from workload_groups()\G -+-------+--------+--------------+-------+ -| Id | Name | Item | Value | -+-------+--------+--------------+-------+ -| 11001 | normal | memory_limit | 100% | -| 11001 | normal | cpu_share | 10 | -+-------+--------+--------------+-------+ -``` - -### keywords - - workload_groups \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/width-bucket.md b/docs/en/docs/sql-manual/sql-functions/width-bucket.md deleted file mode 100644 index 56a2e7ee8fd1e0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/width-bucket.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -{ - "title": "WIDTH_BUCKET", - "language": "en" -} ---- - - - -## width_bucket - -### Description - -Constructs equi-width histograms, in which the histogram range is divided into intervals of identical size, and returns the bucket number into which the value of an expression falls, after it has been evaluated. The function returns an integer value or null (if any input is null). - -#### Syntax - -`INT width_bucket(Expr expr, T min_value, T max_value, INT num_buckets)` - -#### Arguments -`expr` - -The expression for which the histogram is created. This expression must evaluate to a numeric value or to a value that can be implicitly converted to a numeric value. - -The value must be within the range of `-(2^53 - 1)` to `2^53 - 1` (inclusive). - -`min_value` and `max_value` - -The low and high end points of the acceptable range for the expression. The end points must also evaluate to numeric values and not be equal. - -The low and high end points must be within the range of `-(2^53 - 1)` to `2^53 - 1` (inclusive). In addition, the difference between these points must be less than `2^53` (i.e. `abs(max_value - min_value) < 2^53)`. - -`num_buckets` - -The desired number of buckets; must be a positive integer value. A value from the expression is assigned to each bucket, and the function then returns the corresponding bucket number. - - -#### Returned value -It returns the bucket number into which the value of an expression falls. - -When an expression falls outside the range, the function returns: - -`0` if the expression is less than `min_value`. - -`num_buckets + 1` if the expression is greater than or equal to `max_value`. - -`null` if any input is `null`. - -### example - -```sql -DROP TABLE IF EXISTS width_bucket_test; - -CREATE TABLE IF NOT EXISTS width_bucket_test ( - `k1` int NULL COMMENT "", - `v1` date NULL COMMENT "", - `v2` double NULL COMMENT "", - `v3` bigint NULL COMMENT "" - ) ENGINE=OLAP - DUPLICATE KEY(`k1`) - DISTRIBUTED BY HASH(`k1`) BUCKETS 1 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "storage_format" = "V2" - ); - -INSERT INTO width_bucket_test VALUES (1, "2022-11-18", 290000.00, 290000), - (2, "2023-11-18", 320000.00, 320000), - (3, "2024-11-18", 399999.99, 399999), - (4, "2025-11-18", 400000.00, 400000), - (5, "2026-11-18", 470000.00, 470000), - (6, "2027-11-18", 510000.00, 510000), - (7, "2028-11-18", 610000.00, 610000), - (8, null, null, null); - -SELECT * FROM width_bucket_test ORDER BY k1; - -+------+------------+-----------+--------+ -| k1 | v1 | v2 | v3 | -+------+------------+-----------+--------+ -| 1 | 2022-11-18 | 290000 | 290000 | -| 2 | 2023-11-18 | 320000 | 320000 | -| 3 | 2024-11-18 | 399999.99 | 399999 | -| 4 | 2025-11-18 | 400000 | 400000 | -| 5 | 2026-11-18 | 470000 | 470000 | -| 6 | 2027-11-18 | 510000 | 510000 | -| 7 | 2028-11-18 | 610000 | 610000 | -| 8 | NULL | NULL | NULL | -+------+------------+-----------+--------+ - -SELECT k1, v1, v2, v3, width_bucket(v1, date('2023-11-18'), date('2027-11-18'), 4) AS w FROM width_bucket_test ORDER BY k1; - -+------+------------+-----------+--------+------+ -| k1 | v1 | v2 | v3 | w | -+------+------------+-----------+--------+------+ -| 1 | 2022-11-18 | 290000 | 290000 | 0 | -| 2 | 2023-11-18 | 320000 | 320000 | 1 | -| 3 | 2024-11-18 | 399999.99 | 399999 | 2 | -| 4 | 2025-11-18 | 400000 | 400000 | 3 | -| 5 | 2026-11-18 | 470000 | 470000 | 4 | -| 6 | 2027-11-18 | 510000 | 510000 | 5 | -| 7 | 2028-11-18 | 610000 | 610000 | 5 | -| 8 | NULL | NULL | NULL | NULL | -+------+------------+-----------+--------+------+ - -SELECT k1, v1, v2, v3, width_bucket(v2, 200000, 600000, 4) AS w FROM width_bucket_test ORDER BY k1; - -+------+------------+-----------+--------+------+ -| k1 | v1 | v2 | v3 | w | -+------+------------+-----------+--------+------+ -| 1 | 2022-11-18 | 290000 | 290000 | 1 | -| 2 | 2023-11-18 | 320000 | 320000 | 2 | -| 3 | 2024-11-18 | 399999.99 | 399999 | 2 | -| 4 | 2025-11-18 | 400000 | 400000 | 3 | -| 5 | 2026-11-18 | 470000 | 470000 | 3 | -| 6 | 2027-11-18 | 510000 | 510000 | 4 | -| 7 | 2028-11-18 | 610000 | 610000 | 5 | -| 8 | NULL | NULL | NULL | NULL | -+------+------------+-----------+--------+------+ - -SELECT k1, v1, v2, v3, width_bucket(v3, 200000, 600000, 4) AS w FROM width_bucket_test ORDER BY k1; - -+------+------------+-----------+--------+------+ -| k1 | v1 | v2 | v3 | w | -+------+------------+-----------+--------+------+ -| 1 | 2022-11-18 | 290000 | 290000 | 1 | -| 2 | 2023-11-18 | 320000 | 320000 | 2 | -| 3 | 2024-11-18 | 399999.99 | 399999 | 2 | -| 4 | 2025-11-18 | 400000 | 400000 | 3 | -| 5 | 2026-11-18 | 470000 | 470000 | 3 | -| 6 | 2027-11-18 | 510000 | 510000 | 4 | -| 7 | 2028-11-18 | 610000 | 610000 | 5 | -| 8 | NULL | NULL | NULL | NULL | -+------+------------+-----------+--------+------+ - -``` -### keywords -WIDTH_BUCKET \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-avg.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-avg.md deleted file mode 100644 index 22a48eee387938..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-avg.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_AVG", - "language": "en" -} ---- - - - -## WINDOW FUNCTION AVG -### description - -Calculate the mean of the data within the window - -```sql -AVG([ALL] *expression*) [OVER (*analytic_clause*)] -``` - -### example - -Calculate the x-average of the current row and the rows before and after it - -```sql -select x, property, -avg(x) over -( -partition by property -order by x -rows between 1 preceding and 1 following -) as 'moving average' -from int_t where property in ('odd','even'); - - | x | property | moving average | - |----|----------|----------------| - | 2 | even | 3 | - | 4 | even | 4 | - | 6 | even | 6 | - | 8 | even | 8 | - | 10 | even | 9 | - | 1 | odd | 2 | - | 3 | odd | 3 | - | 5 | odd | 5 | - | 7 | odd | 7 | - | 9 | odd | 8 | -``` - -### keywords - - WINDOW,FUNCTION,AVG diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-count.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-count.md deleted file mode 100644 index a024c54a4471e9..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-count.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_COUNT", - "language": "en" -} ---- - - - -## WINDOW FUNCTION COUNT -### description - -Count the number of occurrences of data in the window - -```sql -COUNT(expression) [OVER (analytic_clause)] -``` - -### example - -Count the number of occurrences of x from the current row to the first row. - -```sql -select x, property, -count(x) over -( -partition by property -order by x -rows between unbounded preceding and current row -) as 'cumulative total' -from int_t where property in ('odd','even'); - - | x | property | cumulative count | - |----|----------|------------------| - | 2 | even | 1 | - | 4 | even | 2 | - | 6 | even | 3 | - | 8 | even | 4 | - | 10 | even | 5 | - | 1 | odd | 1 | - | 3 | odd | 2 | - | 5 | odd | 3 | - | 7 | odd | 4 | - | 9 | odd | 5 | -``` - -### keywords - - WINDOW,FUNCTION,COUNT diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-cume-dist.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-cume-dist.md deleted file mode 100644 index 434428875c7148..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-cume-dist.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_CUME_DIST", - "language": "en" -} ---- - - - -## WINDOW FUNCTION CUME_DIST -### description - -CUME_DIST (Cumulative Distribution) is a window function commonly used to calculate the relative ranking of the current row value within a sorted result set. It returns the percentage ranking of the current row value in the result set, i.e., the ratio of the number of rows less than or equal to the current row value to the total number of rows in the result set after sorting. - -```sql -CUME_DIST() OVER(partition_by_clause order_by_clause) -``` - -### example -Suppose there is a table named sales containing sales data, including salesperson name (sales_person), sales amount (sales_amount), and sales date (sales_date). We want to calculate the cumulative percentage of sales amount for each salesperson on each sales date compared to the total sales amount for that day. -```sql -SELECT - sales_person, - sales_date, - sales_amount, - CUME_DIST() OVER (PARTITION BY sales_date ORDER BY sales_amount ASC) AS cumulative_sales_percentage -FROM - sales; -``` - -Suppose the data in the sales table is as follows: - -```sql -+------+--------------+------------+--------------+ -| id | sales_person | sales_date | sales_amount | -+------+--------------+------------+--------------+ -| 1 | Alice | 2024-02-01 | 2000 | -| 2 | Bob | 2024-02-01 | 1500 | -| 3 | Alice | 2024-02-02 | 1800 | -| 4 | Bob | 2024-02-02 | 1200 | -| 5 | Alice | 2024-02-03 | 2200 | -| 6 | Bob | 2024-02-03 | 1900 | -| 7 | Tom | 2024-02-03 | 2000 | -| 8 | Jerry | 2024-02-03 | 2000 | -+------+--------------+------------+--------------+ -``` - -After executing the above SQL query, the result will display the sales amount for each salesperson on each sales date and their cumulative percentage ranking for that sales date. -```sql -+--------------+------------+--------------+-----------------------------+ -| sales_person | sales_date | sales_amount | cumulative_sales_percentage | -+--------------+------------+--------------+-----------------------------+ -| Bob | 2024-02-01 | 1500 | 0.5 | -| Alice | 2024-02-01 | 2000 | 1 | -| Bob | 2024-02-02 | 1200 | 0.5 | -| Alice | 2024-02-02 | 1800 | 1 | -| Bob | 2024-02-03 | 1900 | 0.25 | -| Tom | 2024-02-03 | 2000 | 0.75 | -| Jerry | 2024-02-03 | 2000 | 0.75 | -| Alice | 2024-02-03 | 2200 | 1 | -+--------------+------------+--------------+-----------------------------+ -``` -In this example, the CUME_DIST() function sorts the sales amount for each sales date and then calculates the cumulative percentage of sales amount for each salesperson on that date compared to the total sales amount for that day. Since we use PARTITION BY sales_date, the calculation is done within each sales date, and the sales amount for salespersons on different dates is calculated separately. -### keywords - - WINDOW,FUNCTION,CUME_DIST - diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-dense-rank.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-dense-rank.md deleted file mode 100644 index f7e3935954852b..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-dense-rank.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_DENSE_RANK", - "language": "en" -} ---- - - - -## WINDOW FUNCTION DENSE_RANK -### description - -The DENSE_RANK() function is used to represent rankings. Unlike RANK(), DENSE_RANK() does not have vacancies. For example, if there are two parallel 1s, the third number of DENSE_RANK() is still 2, and the third number of RANK() is 3. - -```sql -DENSE_RANK() OVER(partition_by_clause order_by_clause) -``` - -### example - -Group by the property column to rank column x: - -```sql - select x, y, dense_rank() over(partition by x order by y) as rank from int_t; - - | x | y | rank | - |----|------|----------| - | 1 | 1 | 1 | - | 1 | 2 | 2 | - | 1 | 2 | 2 | - | 2 | 1 | 1 | - | 2 | 2 | 2 | - | 2 | 3 | 3 | - | 3 | 1 | 1 | - | 3 | 1 | 1 | - | 3 | 2 | 2 | -``` - -### keywords - - WINDOW,FUNCTION,DENSE_RANK diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-first-value.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-first-value.md deleted file mode 100644 index a8e3f88e150459..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-first-value.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_FIRST_VALUE", - "language": "en" -} ---- - - - -## WINDOW FUNCTION FIRST_VALUE -### description - -FIRST_VALUE() returns the first value in the window's range. - -```sql -FIRST_VALUE(expr) OVER(partition_by_clause order_by_clause [window_clause]) -``` - -### example - - -We have the following data - -```sql - select name, country, greeting from mail_merge; - - | name | country | greeting | - |---------|---------|--------------| - | Pete | USA | Hello | - | John | USA | Hi | - | Boris | Germany | Guten tag | - | Michael | Germany | Guten morgen | - | Bjorn | Sweden | Hej | - | Mats | Sweden | Tja | -``` - -Use FIRST_VALUE() to group by country and return the value of the first greeting in each group: - -```sql -select country, name, -first_value(greeting) -over (partition by country order by name, greeting) as greeting from mail_merge; - -| country | name | greeting | -|---------|---------|-----------| -| Germany | Boris | Guten tag | -| Germany | Michael | Guten tag | -| Sweden | Bjorn | Hej | -| Sweden | Mats | Hej | -| USA | John | Hi | -| USA | Pete | Hi | -``` - -### keywords - - WINDOW,FUNCTION,FIRST_VALUE diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-lag.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-lag.md deleted file mode 100644 index 954512e29c6edb..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-lag.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_LAG", - "language": "en" -} ---- - - - -## WINDOW FUNCTION LAG -### description - -The LAG() method is used to calculate the value of the current line several lines ahead. - -```sql -LAG(expr, offset, default) OVER (partition_by_clause order_by_clause) -``` - -### example - -Calculate the previous day's closing price - -```sql -select stock_symbol, closing_date, closing_price, -lag(closing_price,1, 0) over (partition by stock_symbol order by closing_date) as "yesterday closing" -from stock_ticker -order by closing_date; - -| stock_symbol | closing_date | closing_price | yesterday closing | -|--------------|---------------------|---------------|-------------------| -| JDR | 2014-09-13 00:00:00 | 12.86 | 0 | -| JDR | 2014-09-14 00:00:00 | 12.89 | 12.86 | -| JDR | 2014-09-15 00:00:00 | 12.94 | 12.89 | -| JDR | 2014-09-16 00:00:00 | 12.55 | 12.94 | -| JDR | 2014-09-17 00:00:00 | 14.03 | 12.55 | -| JDR | 2014-09-18 00:00:00 | 14.75 | 14.03 | -| JDR | 2014-09-19 00:00:00 | 13.98 | 14.75 | -``` - -### keywords - - WINDOW,FUNCTION,LAG diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-last-value.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-last-value.md deleted file mode 100644 index f2cf379c78e7af..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-last-value.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_LAST_VALUE", - "language": "en" -} ---- - - - -## WINDOW FUNCTION LAST_VALUE -### description - -LAST_VALUE() returns the last value in the window range. Opposite of FIRST_VALUE() . - -```sql -LAST_VALUE(expr) OVER(partition_by_clause order_by_clause [window_clause]) -``` - -### example - -Using the data from the FIRST_VALUE() example: - -```sql -select country, name, -last_value(greeting) -over (partition by country order by name, greeting) as greeting -from mail_merge; - -| country | name | greeting | -|---------|---------|--------------| -| Germany | Boris | Guten morgen | -| Germany | Michael | Guten morgen | -| Sweden | Bjorn | Tja | -| Sweden | Mats | Tja | -| USA | John | Hello | -| USA | Pete | Hello | -``` - -### keywords - - WINDOW,FUNCTION,LAST_VALUE diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-lead.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-lead.md deleted file mode 100644 index a0e431957fe4f1..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-lead.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_LEAD", - "language": "en" -} ---- - - - -## WINDOW FUNCTION LEAD -### description - -The LEAD() method is used to calculate the value of the current line several lines backwards. - -```sql -LEAD(expr, offset, default) OVER (partition_by_clause order_by_clause) -``` - -### example - -Calculate the trend of the closing price of the second day compared with the closing price of the day, that is, the closing price of the second day is higher or lower than that of the day. - -```sql -select stock_symbol, closing_date, closing_price, -case -(lead(closing_price,1, 0) -over (partition by stock_symbol order by closing_date)-closing_price) > 0 -when true then "higher" -when false then "flat or lower" -end as "trending" -from stock_ticker -order by closing_date; - -| stock_symbol | closing_date | closing_price | trending | -|--------------|---------------------|---------------|---------------| -| JDR | 2014-09-13 00:00:00 | 12.86 | higher | -| JDR | 2014-09-14 00:00:00 | 12.89 | higher | -| JDR | 2014-09-15 00:00:00 | 12.94 | flat or lower | -| JDR | 2014-09-16 00:00:00 | 12.55 | higher | -| JDR | 2014-09-17 00:00:00 | 14.03 | higher | -| JDR | 2014-09-18 00:00:00 | 14.75 | flat or lower | -| JDR | 2014-09-19 00:00:00 | 13.98 | flat or lower | -``` - -### keywords - - WINDOW,FUNCTION,LEAD diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-max.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-max.md deleted file mode 100644 index fef8646da42adf..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-max.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_MAX", - "language": "en" -} ---- - - - -## WINDOW FUNCTION MAX -### description - -The LEAD() method is used to calculate the maximum value within the window. - -```sql -MAX([DISTINCT | ALL] expression) [OVER (analytic_clause)] -``` - -### example - -Calculate the maximum value from the first row to the row after the current row - -```sql -select x, property, -max(x) over -( -order by property, x -rows between unbounded preceding and 1 following -) as 'local maximum' -from int_t where property in ('prime','square'); - -| x | property | local maximum | -|---|----------|---------------| -| 2 | prime | 3 | -| 3 | prime | 5 | -| 5 | prime | 7 | -| 7 | prime | 7 | -| 1 | square | 7 | -| 4 | square | 9 | -| 9 | square | 9 | -``` - -### keywords - - WINDOW,FUNCTION,MAX diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-min.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-min.md deleted file mode 100644 index d9e1923524af74..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-min.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_MIN", - "language": "en" -} ---- - - - -## WINDOW FUNCTION MIN -### description - -The LEAD() method is used to calculate the minimum value within the window. - -```sql -MAX([DISTINCT | ALL] expression) [OVER (analytic_clause)] -``` - -### example - -Calculate the minimum value from the first row to the row after the current row - -```sql -select x, property, -min(x) over -( -order by property, x desc -rows between unbounded preceding and 1 following -) as 'local minimum' -from int_t where property in ('prime','square'); -| x | property | local minimum | -|---|----------|---------------| -| 7 | prime | 5 | -| 5 | prime | 3 | -| 3 | prime | 2 | -| 2 | prime | 2 | -| 9 | square | 2 | -| 4 | square | 1 | -| 1 | square | 1 | -``` - -### keywords - - WINDOW,FUNCTION,MIN diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-ntile.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-ntile.md deleted file mode 100644 index 13d576cb54c9f6..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-ntile.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_NTILE", - "language": "en" -} ---- - - - -## WINDOW FUNCTION NTILE -### description - -For NTILE(n), this function will divides rows in a sorted partition into a specific number of groups(in this case, n buckets). Each group is assigned a bucket number starting at one. For the case that cannot be distributed evenly, rows are preferentially allocated to the bucket with the smaller number. The number of rows in all buckets cannot differ by more than 1. For now, n must be constant positive integer. - -```sql -NTILE(n) OVER(partition_by_clause order_by_clause) -``` - -### example - -```sql -select x, y, ntile(2) over(partition by x order by y) as ntile from int_t; - -| x | y | rank | -|---|------|----------| -| 1 | 1 | 1 | -| 1 | 2 | 1 | -| 1 | 2 | 2 | -| 2 | 1 | 1 | -| 2 | 2 | 1 | -| 2 | 3 | 2 | -| 3 | 1 | 1 | -| 3 | 1 | 1 | -| 3 | 2 | 2 | -``` - -### keywords - - WINDOW,FUNCTION,NTILE diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-percent-rank.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-percent-rank.md deleted file mode 100644 index 74e43fd37906a9..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-percent-rank.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_PERCENT_RANK", - "language": "en" -} ---- - - - -## WINDOW FUNCTION PERCENT_RANK -### description - -The PERCENT_RANK() is a window function that calculates the percentile rank of a row within a partition or result set. - -The following shows the syntax of the PERCENT_RANK() function: - -```sql -PERCENT_RANK() OVER ( - PARTITION BY partition_expression - ORDER BY - sort_expression [ASC | DESC] -) -``` - -The PERCENT_RANK() function returns a number that ranges from zero to one. - -For a specified row, PERCENT_RANK() calculates the rank of that row minus one, divided by 1 less than the number of rows in the evaluated partition or query result set: - -```sql -(rank - 1) / (total_rows - 1) -``` - -In this formula, rank is the rank of a specified row and total_rows is the number of rows being evaluated. - -The PERCENT_RANK() function always returns zero for the first row in a partition or result set. The repeated column values will receive the same PERCENT_RANK() value. - -Similar to other window functions, the PARTITION BY clause distributes the rows into partitions and the ORDER BY clause specifies the logical order of rows in each partition. The PERCENT_RANK() function is calculated for each ordered partition independently. - -Both PARTITION BY and ORDER BY clauses are optional. However, the PERCENT_RANK() is an order-sensitive function, therefore, you should always use the ORDER BY clause. - -### example - -```sql -// create table -CREATE TABLE test_percent_rank ( - productLine VARCHAR, - orderYear INT, - orderValue DOUBLE, - percentile_rank DOUBLE -) ENGINE=OLAP -DISTRIBUTED BY HASH(`orderYear`) BUCKETS 4 -PROPERTIES ( -"replication_allocation" = "tag.location.default: 1" -); - -// insert data into table -INSERT INTO test_percent_rank (productLine, orderYear, orderValue, percentile_rank) VALUES -('Motorcycles', 2003, 2440.50, 0.00), -('Trains', 2003, 2770.95, 0.17), -('Trucks and Buses', 2003, 3284.28, 0.33), -('Vintage Cars', 2003, 4080.00, 0.50), -('Planes', 2003, 4825.44, 0.67), -('Ships', 2003, 5072.71, 0.83), -('Classic Cars', 2003, 5571.80, 1.00), -('Motorcycles', 2004, 2598.77, 0.00), -('Vintage Cars', 2004, 2819.28, 0.17), -('Planes', 2004, 2857.35, 0.33), -('Ships', 2004, 4301.15, 0.50), -('Trucks and Buses', 2004, 4615.64, 0.67), -('Trains', 2004, 4646.88, 0.83), -('Classic Cars', 2004, 8124.98, 1.00), -('Ships', 2005, 1603.20, 0.00), -('Motorcycles', 2005, 3774.00, 0.17), -('Planes', 2005, 4018.00, 0.50), -('Vintage Cars', 2005, 5346.50, 0.67), -('Classic Cars', 2005, 5971.35, 0.83), -('Trucks and Buses', 2005, 6295.03, 1.00); - -// query -SELECT - productLine, - orderYear, - orderValue, - ROUND( - PERCENT_RANK() - OVER ( - PARTITION BY orderYear - ORDER BY orderValue - ),2) percentile_rank -FROM - test_percent_rank -ORDER BY - orderYear; - -// result -+------------------+-----------+------------+-----------------+ -| productLine | orderYear | orderValue | percentile_rank | -+------------------+-----------+------------+-----------------+ -| Motorcycles | 2003 | 2440.5 | 0 | -| Trains | 2003 | 2770.95 | 0.17 | -| Trucks and Buses | 2003 | 3284.28 | 0.33 | -| Vintage Cars | 2003 | 4080 | 0.5 | -| Planes | 2003 | 4825.44 | 0.67 | -| Ships | 2003 | 5072.71 | 0.83 | -| Classic Cars | 2003 | 5571.8 | 1 | -| Motorcycles | 2004 | 2598.77 | 0 | -| Vintage Cars | 2004 | 2819.28 | 0.17 | -| Planes | 2004 | 2857.35 | 0.33 | -| Ships | 2004 | 4301.15 | 0.5 | -| Trucks and Buses | 2004 | 4615.64 | 0.67 | -| Trains | 2004 | 4646.88 | 0.83 | -| Classic Cars | 2004 | 8124.98 | 1 | -| Ships | 2005 | 1603.2 | 0 | -| Motorcycles | 2005 | 3774 | 0.2 | -| Planes | 2005 | 4018 | 0.4 | -| Vintage Cars | 2005 | 5346.5 | 0.6 | -| Classic Cars | 2005 | 5971.35 | 0.8 | -| Trucks and Buses | 2005 | 6295.03 | 1 | -+------------------+-----------+------------+-----------------+ -``` - -### keywords - - WINDOW,FUNCTION,PERCENT_RANK diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-rank.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-rank.md deleted file mode 100644 index 5d866de280e641..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-rank.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_RANK", - "language": "en" -} ---- - - - -## WINDOW FUNCTION RANK -### description - -The RANK() function is used to represent rankings. Unlike DENSE_RANK(), RANK() will have vacancies. For example, if there are two 1s in a row, the third number in RANK() is 3, not 2. - -```sql -RANK() OVER(partition_by_clause order_by_clause) -``` - -### example - -rank by x - -```sql -select x, y, rank() over(partition by x order by y) as rank from int_t; - -| x | y | rank | -|----|------|----------| -| 1 | 1 | 1 | -| 1 | 2 | 2 | -| 1 | 2 | 2 | -| 2 | 1 | 1 | -| 2 | 2 | 2 | -| 2 | 3 | 3 | -| 3 | 1 | 1 | -| 3 | 1 | 1 | -| 3 | 2 | 3 | -``` - -### keywords - - WINDOW,FUNCTION,RANK diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-row-number.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-row-number.md deleted file mode 100644 index f640d96c27bf89..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-row-number.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_ROW_NUMBER", - "language": "en" -} ---- - - - -## WINDOW FUNCTION ROW_NUMBER -### description - -Returns a continuously increasing integer starting from 1 for each row of each Partition. Unlike RANK() and DENSE_RANK(), the value returned by ROW_NUMBER() does not repeat or appear vacant, and is continuously incremented. - -```sql -ROW_NUMBER() OVER(partition_by_clause order_by_clause) -``` - -### example - -```sql -select x, y, row_number() over(partition by x order by y) as rank from int_t; - -| x | y | rank | -|---|------|----------| -| 1 | 1 | 1 | -| 1 | 2 | 2 | -| 1 | 2 | 3 | -| 2 | 1 | 1 | -| 2 | 2 | 2 | -| 2 | 3 | 3 | -| 3 | 1 | 1 | -| 3 | 1 | 2 | -| 3 | 2 | 3 | -``` - -### keywords - - WINDOW,FUNCTION,ROW_NUMBER diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-sum.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-sum.md deleted file mode 100644 index cd75600e3d96af..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-sum.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_SUM", - "language": "en" -} ---- - - - -## WINDOW FUNCTION SUM -### description - -Calculate the sum of the data in the window - -```sql -SUM([ALL] expression) [OVER (analytic_clause)] -``` - -### example - -Group by property, and calculate the sum of the x columns of the current row and the previous row within the group. - -```sql -select x, property, -sum(x) over -( -partition by property -order by x -rows between 1 preceding and 1 following -) as 'moving total' -from int_t where property in ('odd','even'); - -| x | property | moving total | -|----|----------|--------------| -| 2 | even | 6 | -| 4 | even | 12 | -| 6 | even | 18 | -| 8 | even | 24 | -| 10 | even | 18 | -| 1 | odd | 4 | -| 3 | odd | 9 | -| 5 | odd | 15 | -| 7 | odd | 21 | -| 9 | odd | 16 | -``` - -### keywords - - WINDOW,FUNCTION,SUM diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-window-funnel.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-window-funnel.md deleted file mode 100644 index ed58c41625065d..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function-window-funnel.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -{ - "title": "WINDOW_FUNCTION_WINDOW_FUNNEL", - "language": "en" -} ---- - - - -## WINDOW FUNCTION WINDOW_FUNNEL -### description - -Searches the longest event chain happened in order (event1, event2, ... , eventN) along the timestamp_column with length of window. - -- window is the length of time window in seconds. -- mode can be one of the followings: - - "default": Defualt mode. - - "deduplication": If the same event holds for the sequence of events, then such repeating event interrupts further processing. E.g. the array parameter is [event1='A', event2='B', event3='C', event4='D'], and the original event chain is "A-B-C-B-D". Since event B repeats, the filtered event chain can only be "A-B-C" and the max event level is 3. - - "fixed": Don't allow interventions of other events. E.g. the array parameter is [event1='A', event2='B', event3='C', event4='D'], and the original event chain is A->B->D->C, it stops finding A->B->C at the D and the max event level is 2. - - "increase": Apply conditions only to events with strictly increasing timestamps. -- timestamp_column specifies column of DATETIME type, sliding time window works on it. -- evnetN is boolean expression like eventID = 1004. - -The function works according to the algorithm: - -- The function searches for data that triggers the first condition in the chain and sets the event counter to 1. This is the moment when the sliding window starts. -- If events from the chain occur sequentially within the window, the counter is incremented. If the sequence of events is disrupted, the counter is not incremented. -- If the data has multiple event chains at varying points of completion, the function will only output the size of the longest chain. - -```sql -window_funnel(window, mode, timestamp_column, event1, event2, ... , eventN) -``` - -### example - -```sql -CREATE TABLE windowfunnel_test ( - `xwho` varchar(50) NULL COMMENT 'xwho', - `xwhen` datetime COMMENT 'xwhen', - `xwhat` int NULL COMMENT 'xwhat' - ) -DUPLICATE KEY(xwho) -DISTRIBUTED BY HASH(xwho) BUCKETS 3 -PROPERTIES ( - "replication_num" = "1" -); - -INSERT into windowfunnel_test (xwho, xwhen, xwhat) values ('1', '2022-03-12 10:41:00', 1), - ('1', '2022-03-12 13:28:02', 2), - ('1', '2022-03-12 16:15:01', 3), - ('1', '2022-03-12 19:05:04', 4); - -select window_funnel(3600 * 3, 'default', t.xwhen, t.xwhat = 1, t.xwhat = 2 ) AS level from windowfunnel_test t; - -| level | -|---| -| 2 | -``` - -### keywords - - WINDOW,FUNCTION,WINDOW_FUNNEL diff --git a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function.md b/docs/en/docs/sql-manual/sql-functions/window-functions/window-function.md deleted file mode 100644 index 7a74fff74451b0..00000000000000 --- a/docs/en/docs/sql-manual/sql-functions/window-functions/window-function.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -{ - "title": "Window Functions Overview", - "language": "en" -} ---- - - - -## WINDOW FUNCTION -### description - -Analytical functions(windown function) are a special class of built-in functions. Similar to aggregate functions, analytic functions also perform calculations on multiple input rows to obtain a data value. The difference is that the analytic function processes the input data within a specific window, rather than grouping calculations by group by. The data within each window can be sorted and grouped using the over() clause. The analytic function computes a single value for each row of the result set, rather than one value per group by grouping. This flexible approach allows the user to add additional columns to the select clause, giving the user more opportunities to reorganize and filter the result set. Analytic functions can only appear in select lists and in the outermost order by clause. During the query process, the analytical function will take effect at the end, that is, after the join, where and group by operations are performed. Analytical functions are often used in financial and scientific computing to analyze trends, calculate outliers, and perform bucket analysis on large amounts of data. - -The syntax of the analytic function: - -```sql -function(args) OVER(partition_by_clause order_by_clause [window_clause]) -partition_by_clause ::= PARTITION BY expr [, expr ...] -order_by_clause ::= ORDER BY expr [ASC | DESC] [, expr [ASC | DESC] ...] -``` - -#### Function - -Support Functions: AVG(), COUNT(), DENSE_RANK(), FIRST_VALUE(), LAG(), LAST_VALUE(), LEAD(), MAX(), MIN(), RANK(), ROW_NUMBER(), SUM() - -#### PARTITION BY clause - -The Partition By clause is similar to Group By. It groups the input rows according to the specified column or columns, and rows with the same value will be grouped together. - -#### ORDER BY clause - -The Order By clause is basically the same as the outer Order By. It defines the order in which the input rows are sorted, and if Partition By is specified, Order By defines the order within each Partition grouping. The only difference from the outer Order By is that the Order By n (n is a positive integer) in the OVER clause is equivalent to doing nothing, while the outer Order By n means sorting according to the nth column. - -Example: - -This example shows adding an id column to the select list with values 1, 2, 3, etc., sorted by the date_and_time column in the events table. - -```sql -SELECT -row_number() OVER (ORDER BY date_and_time) AS id, -c1, c2, c3, c4 -FROM events; -``` - -#### Window clause - -The Window clause is used to specify an operation range for the analytical function, the current row is the criterion, and several rows before and after are used as the object of the analytical function operation. The methods supported by the Window clause are: AVG(), COUNT(), FIRST_VALUE(), LAST_VALUE() and SUM(). For MAX() and MIN(), the window clause can specify the starting range UNBOUNDED PRECEDING - -syntax: - -```sql -ROWS BETWEEN [ { m | UNBOUNDED } PRECEDING | CURRENT ROW] [ AND [CURRENT ROW | { UNBOUNDED | n } FOLLOWING] ] -``` - -### example - -Suppose we have the following stock data, the stock symbol is JDR, and the closing price is the closing price of each day. - -```sql -create table stock_ticker (stock_symbol string, closing_price decimal(8,2), closing_date timestamp); -...load some data... -select * from stock_ticker order by stock_symbol, closing_date - | stock_symbol | closing_price | closing_date | - |--------------|---------------|---------------------| - | JDR | 12.86 | 2014-10-02 00:00:00 | - | JDR | 12.89 | 2014-10-03 00:00:00 | - | JDR | 12.94 | 2014-10-04 00:00:00 | - | JDR | 12.55 | 2014-10-05 00:00:00 | - | JDR | 14.03 | 2014-10-06 00:00:00 | - | JDR | 14.75 | 2014-10-07 00:00:00 | - | JDR | 13.98 | 2014-10-08 00:00:00 | -``` - -This query uses the analytic function to generate the column moving_average, whose value is the 3-day average price of the stock, that is, the three-day average price of the previous day, the current day, and the next day. The first day has no value for the previous day, and the last day does not have the value for the next day, so these two lines only calculate the average of the two days. Partition By does not play a role here, because all the data are JDR data, but if there is other stock information, Partition By will ensure that the analysis function value acts within this Partition. - -```sql -select stock_symbol, closing_date, closing_price, -avg(closing_price) over (partition by stock_symbol order by closing_date -rows between 1 preceding and 1 following) as moving_average -from stock_ticker; - | stock_symbol | closing_date | closing_price | moving_average | - |--------------|---------------------|---------------|----------------| - | JDR | 2014-10-02 00:00:00 | 12.86 | 12.87 | - | JDR | 2014-10-03 00:00:00 | 12.89 | 12.89 | - | JDR | 2014-10-04 00:00:00 | 12.94 | 12.79 | - | JDR | 2014-10-05 00:00:00 | 12.55 | 13.17 | - | JDR | 2014-10-06 00:00:00 | 14.03 | 13.77 | - | JDR | 2014-10-07 00:00:00 | 14.75 | 14.25 | - | JDR | 2014-10-08 00:00:00 | 13.98 | 14.36 | -``` - -### keywords - - WINDOW,FUNCTION diff --git a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/ALTER-USER.md b/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/ALTER-USER.md deleted file mode 100644 index 7ba46f77af2b86..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/ALTER-USER.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -{ - "title": "ALTER-USER", - "language": "en" -} ---- - - - -## ALTER USER - -### Name - -ALTER USER - -### Description - -The ALTER USER command is used to modify a user's account attributes, including passwords, and password policies, etc. - ->Note that. -> ->This command give over supports modifying user roles from versions 2.0. Please use [GRANT](./GRANT.md) and [REVOKE](./REVOKE.md) for related operations - -```sql -ALTER USER [IF EXISTS] user_identity [IDENTIFIED BY 'password'] -[password_policy] - -user_identity: - 'user_name'@'host' - -password_policy: - - 1. PASSWORD_HISTORY [n|DEFAULT] - 2. PASSWORD_EXPIRE [DEFAULT|NEVER|INTERVAL n DAY/HOUR/SECOND] - 3. FAILED_LOGIN_ATTEMPTS n - 4. PASSWORD_LOCK_TIME [n DAY/HOUR/SECOND|UNBOUNDED] - 5. ACCOUNT_UNLOCK -``` - -About `user_identity` and `password_policy`, Please refer to `CREATE USER`. - -`ACCOUNT_UNLOCK` is used to unlock a locked user. - -In an ALTER USER command, only one of the following account attributes can be modified at the same time: - -1. Change password -2. Modify `PASSWORD_HISTORY` -3. Modify `PASSWORD_EXPIRE` -4. Modify `FAILED_LOGIN_ATTEMPTS` and `PASSWORD_LOCK_TIME` -5. Unlock users - -### Example - -1. Change the user's password - - ``` - ALTER USER jack@'%' IDENTIFIED BY "12345"; - ``` - -2. Modify the user's password policy - - ``` - ALTER USER jack@'%' FAILED_LOGIN_ATTEMPTS 3 PASSWORD_LOCK_TIME 1 DAY; - ``` - -3. Unlock a user - - ``` - ALTER USER jack@'%' ACCOUNT_UNLOCK - ``` - -### Keywords - - ALTER, USER - -### Best Practice - -1. Modify the password policy - - 1. Modify `PASSWORD_EXPIRE` will reset the timing of password expiration time. - - 2. Modify `FAILED_LOGIN_ATTEMPTS` or `PASSWORD_LOCK_TIME` will unlock the user. \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/CREATE-ROLE.md b/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/CREATE-ROLE.md deleted file mode 100644 index 1b3947a6cd1617..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/CREATE-ROLE.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "CREATE-ROLE", - "language": "en" -} ---- - - - -## CREATE-ROLE - -### Name - -CREATE ROLE - -### Description - -The statement user creates a role - -```sql - CREATE ROLE rol_name; -```` - -This statement creates an unprivileged role, which can be subsequently granted with the GRANT command. - -### Example - -1. Create a character - - ```sql - CREATE ROLE role1; - ```` - -### Keywords - - CREATE, ROLE - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/CREATE-USER.md b/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/CREATE-USER.md deleted file mode 100644 index aed37f74fefd36..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/CREATE-USER.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -{ - "title": "CREATE-USER", - "language": "en" -} ---- - - - -## CREATE-USER - -### Name - -CREATE USER - -### Description - -The CREATE USER command is used to create a Doris user. - -```sql -CREATE USER [IF EXISTS] user_identity [IDENTIFIED BY 'password'] -[DEFAULT ROLE 'role_name'] -[password_policy] - -user_identity: - 'user_name'@'host' - -password_policy: - - 1. PASSWORD_HISTORY [n|DEFAULT] - 2. PASSWORD_EXPIRE [DEFAULT|NEVER|INTERVAL n DAY/HOUR/SECOND] - 3. FAILED_LOGIN_ATTEMPTS n - 4. PASSWORD_LOCK_TIME [n DAY/HOUR/SECOND|UNBOUNDED] -``` - -In Doris, a user_identity uniquely identifies a user. user_identity consists of two parts, user_name and host, where username is the username. host Identifies the host address where the client connects. The host part can use % for fuzzy matching. If no host is specified, it defaults to '%', which means the user can connect to Doris from any host. - -The host part can also be specified as a domain, the syntax is: 'user_name'@['domain'], even if it is surrounded by square brackets, Doris will think this is a domain and try to resolve its ip address. . - -If a role (ROLE) is specified, the newly created user will be automatically granted the permissions of the role. If not specified, the user has no permissions by default. The specified ROLE must already exist. - -`password_policy` is a clause used to specify policies related to password authentication login. Currently, the following policies are supported: - -1. `PASSWORD_HISTORY` - - Whether to allow the current user to use historical passwords when resetting their passwords. For example, `PASSWORD_HISTORY 10` means that it is forbidden to use the password set in the past 10 times as a new password. If set to `PASSWORD_HISTORY DEFAULT`, the value in the global variable `password_history` will be used. `0` means do not enable this feature. Default is 0. - -2. `PASSWORD_EXPIRE` - - Set the expiration time of the current user's password. For example `PASSWORD_EXPIRE INTERVAL 10 DAY` means the password will expire in 10 days. `PASSWORD_EXPIRE NEVER` means that the password does not expire. If set to `PASSWORD_EXPIRE DEFAULT`, the value in the global variable `default_password_lifetime` is used. Defaults to NEVER (or 0), which means it will not expire. - -3. `FAILED_LOGIN_ATTEMPTS` and `PASSWORD_LOCK_TIME` - - When the current user logs in, if the user logs in with the wrong password for n times, the account will be locked, and the lock time is set. For example, `FAILED_LOGIN_ATTEMPTS 3 PASSWORD_LOCK_TIME 1 DAY` means that if you log in wrongly for 3 times, the account will be locked for one day. - - A locked account can be actively unlocked through the `ALTER USER` statement. - -### Example - -1. Create a passwordless user (if host is not specified, it is equivalent to jack@'%') - - ```sql - CREATE USER 'jack'; - ``` - -2. Create a user with a password to allow login from '172.10.1.10' - - ```sql - CREATE USER jack@'172.10.1.10' IDENTIFIED BY '123456'; - ``` - -3. In order to avoid passing plaintext, use case 2 can also be created in the following way - - ```sql - CREATE USER jack@'172.10.1.10' IDENTIFIED BY PASSWORD '*6BB4837EB74329105EE4568DDA7DC67ED2CA2AD9'; - The encrypted content can be obtained through PASSWORD(), for example: - SELECT PASSWORD('123456'); - ``` - -4. Create a user that is allowed to log in from the '192.168' subnet, and specify its role as example_role - - ```sql - CREATE USER 'jack'@'192.168.%' DEFAULT ROLE 'example_role'; - ``` - -5. Create a user that is allowed to log in from the domain 'example_domain' - - ```sql - CREATE USER 'jack'@['example_domain'] IDENTIFIED BY '12345'; - ``` - -6. Create a user and assign a role - - ```sql - CREATE USER 'jack'@'%' IDENTIFIED BY '12345' DEFAULT ROLE 'my_role'; - ``` - -7. Create a user, set the password to expire after 10 days, and set the account to be locked for one day if you log in failed for 3 times. - - ```sql - CREATE USER 'jack' IDENTIFIED BY '12345' PASSWORD_EXPIRE INTERVAL 10 DAY FAILED_LOGIN_ATTEMPTS 3 PASSWORD_LOCK_TIME 1 DAY; - ``` - -8. Create a user and restrict non-resetable passwords to the last 8 passwords used. - - ```sql - CREATE USER 'jack' IDENTIFIED BY '12345' PASSWORD_HISTORY 8; - ``` - -### Keywords - - CREATE, USER - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/DROP-ROLE.md b/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/DROP-ROLE.md deleted file mode 100644 index 73ebe8d9cfcd2a..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/DROP-ROLE.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -{ - "title": "DROP-ROLE", - "language": "en" -} ---- - - - -## DROP-ROLE - -### Description - -The statement user removes a role - -```sql - DROP ROLE [IF EXISTS] role1; -```` - -Deleting a role does not affect the permissions of users who previously belonged to the role. It is only equivalent to decoupling the role from the user. The permissions that the user has obtained from the role will not change - -### Example - -1. Drop a role1 - -```sql -DROP ROLE role1; -```` - -### Keywords - - DROP, ROLE - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/DROP-USER.md b/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/DROP-USER.md deleted file mode 100644 index 5c4485632859a8..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/DROP-USER.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "DROP-USER", - "language": "en" -} ---- - - - -## DROP-USER - -### Name - -DROP USER - -### Description - -Delete a user - -```sql - DROP USER 'user_identity' - - `user_identity`: - - user@'host' - user@['domain'] -```` - - Delete the specified user identitiy. - -### Example - -1. Delete user jack@'192.%' - - ```sql - DROP USER 'jack'@'192.%' - ```` - -### Keywords - - DROP, USER - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/GRANT.md b/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/GRANT.md deleted file mode 100644 index da18856d269c13..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/GRANT.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -{ - "title": "GRANT", - "language": "en" -} ---- - - - -## GRANT - -### Name - -GRANT - -### Description - -The GRANT command has the following functions: - -1. Grant the specified permissions to a user or role. -2. Grant the specified role to a user. - ->Note that. -> ->"Grant specified roles to user" is supported in versions 2.0 and later - -```sql -GRANT privilege_list ON priv_level TO user_identity [ROLE role_name] - -GRANT privilege_list ON RESOURCE resource_name TO user_identity [ROLE role_name] - -GRANT role_list TO user_identity -```` - -GRANT privilege_list ON WORKLOAD GROUP workload_group_name TO user_identity [ROLE role_name] - -privilege_list is a list of privileges to be granted, separated by commas. Currently Doris supports the following permissions: - - NODE_PRIV: Cluster node operation permissions, including node online and offline operations. User who has NODE_PRIV and GRANT_PRIV permission, can grant NODE_PRIV to other users. - ADMIN_PRIV: All privileges except NODE_PRIV. - GRANT_PRIV: Privilege for operation privileges. Including creating and deleting users, roles, authorization and revocation, setting passwords, etc. - SELECT_PRIV: read permission on the specified database or table - LOAD_PRIV: Import privileges on the specified database or table - ALTER_PRIV: Schema change permission for the specified database or table - CREATE_PRIV: Create permission on the specified database or table - DROP_PRIV: drop privilege on the specified database or table - USAGE_PRIV: access to the specified resource - SHOW_VIEW_PRIV: View permission to `view` creation statements (starting from version 2.0.3, 'SELECT_PRIV' and 'LOAD_PRIV' permissions cannot be 'SHOW CREATE TABLE view_name', has one of `CREATE_PRIV`,`ALTER_PRIV`,`DROP_PRIV`,`SHOW_VIEW_PRIV` can `SHOW CREATE TABLE view_name`) - - ALL and READ_WRITE in legacy permissions will be converted to: SELECT_PRIV,LOAD_PRIV,ALTER_PRIV,CREATE_PRIV,DROP_PRIV; - READ_ONLY is converted to SELECT_PRIV. - -Permission classification: - - 1. Node Privilege: NODE_PRIV - 2. database table permissions: SELECT_PRIV, LOAD_PRIV, ALTER_PRIV, CREATE_PRIV, DROP_PRIV - 3. Resource and workload groups Privilege: USAGE_PRIV - -Priv_level supports the following four forms: - - 1. *.*.* permissions can be applied to all catalogs, all databases and all tables in them - 2. catalog_name.*.* permissions can be applied to all databases and all tables in them - 3. catalog_name.db.* permissions can be applied to all tables under the specified database - 4. catalog_name.db.tbl permission can be applied to the specified table under the specified database - - The catalog or database, table specified here may be not exist. - -resource_name supports the following two forms: - - 1. * Permissions apply to all resources - 2. The resource permission applies to the specified resource - - The resource specified here can be a non-existing resource. In addition, please distinguish the resources here from external tables, and use catalog as an alternative if you use external tables. - -workload_group_name specifies the workload group name and supports `%` and `_` match characters, `%` can match any string and `_` matches any single character. - -user_identity: - - The user_identity syntax here is the same as CREATE USER. And must be a user_identity created with CREATE USER. The host in user_identity can be a domain name. If it is a domain name, the effective time of the authority may be delayed by about 1 minute. - - You can also assign permissions to the specified ROLE, if the specified ROLE does not exist, it will be created automatically. - -role_list is the list of roles to be assigned, separated by commas,the specified role must exist. - -### Example - -1. Grant permissions to all catalog and databases and tables to the user - - ```sql - GRANT SELECT_PRIV ON *.*.* TO 'jack'@'%'; - ```` - -2. Grant permissions to the specified database table to the user - - ```sql - GRANT SELECT_PRIV,ALTER_PRIV,LOAD_PRIV ON ctl1.db1.tbl1 TO 'jack'@'192.8.%'; - ```` - -3. Grant permissions to the specified database table to the role - - ```sql - GRANT LOAD_PRIV ON ctl1.db1.* TO ROLE 'my_role'; - ```` - -4. Grant access to all resources to users - - ```sql - GRANT USAGE_PRIV ON RESOURCE * TO 'jack'@'%'; - ```` - -5. Grant the user permission to use the specified resource - - ```sql - GRANT USAGE_PRIV ON RESOURCE 'spark_resource' TO 'jack'@'%'; - ```` - -6. Grant access to specified resources to roles - - ```sql - GRANT USAGE_PRIV ON RESOURCE 'spark_resource' TO ROLE 'my_role'; - ```` - - - -7. Grant the specified role to a user - - ```sql - GRANT 'role1','role2' TO 'jack'@'%'; - ```` - - - -8. Grant the specified workload group 'g1' to user jack - - ```sql - GRANT USAGE_PRIV ON WORKLOAD GROUP 'g1' TO 'jack'@'%'. - ```` - -9. match all workload groups granted to user jack - - ```sql - GRANT USAGE_PRIV ON WORKLOAD GROUP '%' TO 'jack'@'%'. - ```` - -10. grant the workload group 'g1' to the role my_role - - ```sql - GRANT USAGE_PRIV ON WORKLOAD GROUP 'g1' TO ROLE 'my_role'. - ```` - -11. Allow jack to view the creation statement of view1 under db1 - - ```sql - GRANT SHOW_VIEW_PRIV ON db1.view1 TO 'jack'@'%'; - ```` - -### Keywords - - GRANT - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/LDAP.md b/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/LDAP.md deleted file mode 100644 index 3e0da6f0a22aba..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/LDAP.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "LDAP", - "language": "en" -} ---- - - - -## LDAP - -### Name - -LDAP - -### Description - -SET LDAP_ADMIN_PASSWORD - -```sql - SET LDAP_ADMIN_PASSWORD = PASSWORD('plain password') -```` - - The SET LDAP_ADMIN_PASSWORD command is used to set the LDAP administrator password. When using LDAP authentication, doris needs to use the administrator account and password to query the LDAP service for login user information. - -### Example - -1. Set the LDAP administrator password - -```sql -SET LDAP_ADMIN_PASSWORD = PASSWORD('123456') -```` - -### Keywords - - LDAP, PASSWORD, LDAP_ADMIN_PASSWORD - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/REVOKE.md b/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/REVOKE.md deleted file mode 100644 index 56b1cd145c0fe9..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/REVOKE.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -{ - "title": "REVOKE", - "language": "en" -} ---- - - - -## REVOKE - -### Name - -REVOKE - -### Description - -The REVOKE command has the following functions: - -1. Revoke the specified permission of a user or a role. -2. Revoke the specified role previously granted to a user. - ->Note that. -> ->"Revoke the specified roles previously granted to a user" is supported in versions 2.0 and later - -```sql -REVOKE privilege_list ON db_name[.tbl_name] FROM user_identity [ROLE role_name] - -REVOKE privilege_list ON RESOURCE resource_name FROM user_identity [ROLE role_name] - -REVOKE role_list FROM user_identity -```` - -user_identity: - -The user_identity syntax here is the same as CREATE USER. And must be a user_identity created with CREATE USER. The host in user_identity can be a domain name. If it is a domain name, the revocation time of permissions may be delayed by about 1 minute. - -It is also possible to revoke the permissions of the specified ROLE, the executed ROLE must exist. - -role_list is the list of roles to be revoked, separated by commas. The specified roles must exist. - -### Example - -1. Revoke the permission of user jack database testDb - - ```sql - REVOKE SELECT_PRIV ON db1.* FROM 'jack'@'192.%'; - ```` - -2. Revoke user jack resource spark_resource permission - - ```sql - REVOKE USAGE_PRIV ON RESOURCE 'spark_resource' FROM 'jack'@'192.%'; - ```` -3. Revoke the roles role1 and role2 previously granted to jack - - ```sql - REVOKE 'role1','role2' FROM 'jack'@'192.%'; - ``` - -### Keywords - - REVOKE - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/SET-PASSWORD.md b/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/SET-PASSWORD.md deleted file mode 100644 index 376c1f7755f94d..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/SET-PASSWORD.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -{ - "title": "SET-PASSWORD", - "language": "en" -} ---- - - - -## SET-PASSWORD - -### Name - -SET PASSWORD - -### Description - -The SET PASSWORD command can be used to modify a user's login password. If the [FOR user_identity] field does not exist, then change the current user's password - -```sql -SET PASSWORD [FOR user_identity] = - [PASSWORD('plain password')]|['hashed password'] -```` - -Note that the user_identity here must exactly match the user_identity specified when creating a user with CREATE USER, otherwise an error will be reported that the user does not exist. If user_identity is not specified, the current user is 'username'@'ip', which may not match any user_identity. Current users can be viewed through SHOW GRANTS. - -The plaintext password is input in the PASSWORD() method; when using a string directly, the encrypted password needs to be passed. -To modify the passwords of other users, administrator privileges are required. - -### Example - -1. Modify the current user's password - - ```sql - SET PASSWORD = PASSWORD('123456') - SET PASSWORD = '*6BB4837EB74329105EE4568DDA7DC67ED2CA2AD9' - ```` - -2. Modify the specified user password - - ```sql - SET PASSWORD FOR 'jack'@'192.%' = PASSWORD('123456') - SET PASSWORD FOR 'jack'@['domain'] = '*6BB4837EB74329105EE4568DDA7DC67ED2CA2AD9' - ```` - -### Keywords - - SET, PASSWORD - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/SET-PROPERTY.md b/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/SET-PROPERTY.md deleted file mode 100644 index aa16dbbed9f84b..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Account-Management-Statements/SET-PROPERTY.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -{ - "title": "SET-PROPERTY", - "language": "en" -} ---- - - - -## SET-PROPERTY - -### Name - -SET PROPERTY - -### Description - -Set user attributes, including resources assigned to users, importing clusters, etc. - -```sql -SET PROPERTY [FOR 'user'] 'key' = 'value' [, 'key' = 'value'] -```` - -The user attribute set here is for user, not user_identity. That is, if two users 'jack'@'%' and 'jack'@'192.%' are created through the CREATE USER statement, the SET PROPERTY statement can only be used for the user jack, not 'jack'@'% ' or 'jack'@'192.%' - -key: - -Super user privileges: - - max_user_connections: The maximum number of connections. - - max_query_instances: The number of instances that a user can use to execute a query at the same time. - - sql_block_rules: Set sql block rules. Once set, queries sent by this user will be rejected if they match the rules. - - cpu_resource_limit: Limit the cpu resources for queries. See the introduction to the session variable `cpu_resource_limit` for details. -1 means not set. - - exec_mem_limit: Limit the memory usage of the query. See the introduction to the session variable `exec_mem_limit` for details. -1 means not set. - - resource.cpu_share: CPU resource allocation. (obsolete) - - load_cluster.{cluster_name}.priority: Assign priority to the specified cluster, which can be HIGH or NORMAL - - resource_tags: Specifies the user's resource tag permissions. - - query_timeout: Specifies the user's query timeout permissions. - - Note: If the attributes `cpu_resource_limit`, `exec_mem_limit` are not set, the value in the session variable will be used by default. - -Ordinary user rights: - - quota.normal: resource allocation at the normal level. - - quota.high: High-level resource allocation. - - quota.low: resource allocation at low level. - - load_cluster.{cluster_name}.hadoop_palo_path: The hadoop directory used by palo, which needs to store the etl program and the intermediate data generated by etl for Doris to import. After the import is completed, the intermediate will be automatically cleaned up - -Data, etl program automatically retains the next use. - - load_cluster.{cluster_name}.hadoop_configs: The configuration of hadoop, where fs.default.name, mapred.job.tracker, hadoop.job.ugi must be filled in. - - load_cluster.{cluster_name}.hadoop_http_port: hadoop hdfs name node http port. Where hdfs defaults to 8070, afs defaults to 8010. - - default_load_cluster: The default import cluster. - -### Example - -1. Modify the maximum number of user jack connections to 1000 - - ```sql - SET PROPERTY FOR 'jack' 'max_user_connections' = '1000'; - ```` - -2. Modify the cpu_share of user jack to 1000 - - ```sql - SET PROPERTY FOR 'jack' 'resource.cpu_share' = '1000'; - ```` - -3. Modify the weight of the jack user's normal group - - ```sql - SET PROPERTY FOR 'jack' 'quota.normal' = '400'; - ```` - -4. Add import cluster for user jack - - ```sql - SET PROPERTY FOR 'jack' - 'load_cluster.{cluster_name}.hadoop_palo_path' = '/user/doris/doris_path', - 'load_cluster.{cluster_name}.hadoop_configs' = 'fs.default.name=hdfs://dpp.cluster.com:port;mapred.job.tracker=dpp.cluster.com:port;hadoop.job.ugi=user ,password;mapred.job.queue.name=job_queue_name_in_hadoop;mapred.job.priority=HIGH;'; - ```` - -5. Delete the imported cluster under user jack. - - ```sql - SET PROPERTY FOR 'jack' 'load_cluster.{cluster_name}' = ''; - ```` - -6. Modify the default import cluster of user jack - - ```sql - SET PROPERTY FOR 'jack' 'default_load_cluster' = '{cluster_name}'; - ```` - -7. Change the cluster priority of user jack to HIGH - - ```sql - SET PROPERTY FOR 'jack' 'load_cluster.{cluster_name}.priority' = 'HIGH'; - ```` - -8. Modify the number of available instances for user jack's query to 3000 - - ```sql - SET PROPERTY FOR 'jack' 'max_query_instances' = '3000'; - ```` - -9. Modify the sql block rule of user jack - - ```sql - SET PROPERTY FOR 'jack' 'sql_block_rules' = 'rule1, rule2'; - ```` - -10. Modify the cpu usage limit of user jack - - ```sql - SET PROPERTY FOR 'jack' 'cpu_resource_limit' = '2'; - ```` - -11. Modify the user's resource tag permissions - - ```sql - SET PROPERTY FOR 'jack' 'resource_tags.location' = 'group_a, group_b'; - ```` - -12. Modify the user's query memory usage limit, in bytes - - ```sql - SET PROPERTY FOR 'jack' 'exec_mem_limit' = '2147483648'; - ```` - -13. Modify the user's query timeout limit, in second - - ```sql - SET PROPERTY FOR 'jack' 'query_timeout' = '500'; - ```` - -### Keywords - - SET, PROPERTY - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-ADD-BACKEND.md b/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-ADD-BACKEND.md deleted file mode 100644 index 7006ac761abb22..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-ADD-BACKEND.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -{ - "title": "ALTER-SYSTEM-ADD-BACKEND", - "language": "en" -} ---- - - - -## ALTER-SYSTEM-ADD-BACKEND - -### Name - -ALTER SYSTEM ADD BACKEND - -### Description - -This statement is used to manipulate nodes within a system. (Administrator only!) - -grammar: - -```sql --- Add nodes (add this method if you do not use the multi-tenancy function) - ALTER SYSTEM ADD BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; -```` - - illustrate: - -1. host can be a hostname or an ip address -2. heartbeat_port is the heartbeat port of the node -3. Adding and deleting nodes is a synchronous operation. These two operations do not consider the existing data on the node, and the node is directly deleted from the metadata, please use it with caution. - -### Example - - 1. Add a node - - ```sql - ALTER SYSTEM ADD BACKEND "host:port"; - ```` - -### Keywords - - ALTER, SYSTEM, ADD, BACKEND, ALTER SYSTEM - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-ADD-BROKER.md b/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-ADD-BROKER.md deleted file mode 100644 index b1d01552c0e913..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-ADD-BROKER.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -{ - "title": "ALTER-SYSTEM-ADD-BROKER", - "language": "en" -} - ---- - - - -## ALTER-SYSTEM-ADD-BROKER - -### Name - -ALTER SYSTEM ADD BROKER - -### Description - -This statement is used to add a BROKER node. (Administrator only!) - -grammar: - -```sql -ALTER SYSTEM ADD BROKER broker_name "broker_host1:broker_ipc_port1","broker_host2:broker_ipc_port2",...; -```` - -### Example - -1. Add two brokers - - ```sql - ALTER SYSTEM ADD BROKER "host1:port", "host2:port"; - ```` -2. When fe enable fqdn([fqdn](../../../admin-manual/cluster-management/fqdn.md)),add one Broker - - ```sql - ALTER SYSTEM ADD BROKER "broker_fqdn1:port"; - ``` - -### Keywords - - ALTER, SYSTEM, ADD, FOLLOWER, ALTER SYSTEM - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-ADD-FOLLOWER.md b/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-ADD-FOLLOWER.md deleted file mode 100644 index 4ddad69465acd7..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-ADD-FOLLOWER.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "ALTER-SYSTEM-ADD-FOLLOWER", - "language": "en" -} ---- - - - -## ALTER-SYSTEM-ADD-FOLLOWER - -### Name - -ALTER SYSTEM ADD FOLLOWER - -### Description - -This statement is to increase the node of the FOLLOWER role of FRONTEND, (only for administrators!) - -grammar: - -```sql -ALTER SYSTEM ADD FOLLOWER "follower_host:edit_log_port" -```` - -illustrate: - -1. host can be a hostname or an ip address -2. edit_log_port : edit_log_port in its configuration file fe.conf - -### Example - -1. Add a FOLLOWER node - - ```sql - ALTER SYSTEM ADD FOLLOWER "host_ip:9010" - ```` - -### Keywords - - ALTER, SYSTEM, ADD, FOLLOWER, ALTER SYSTEM - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-ADD-OBSERVER.md b/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-ADD-OBSERVER.md deleted file mode 100644 index ecb252277b4ec7..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-ADD-OBSERVER.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "ALTER-SYSTEM-ADD-OBSERVER", - "language": "en" -} ---- - - - -## ALTER-SYSTEM-ADD-OBSERVER - -### Name - -ALTER SYSTEM ADD OBSERVER - -### Description - -This statement is to increase the node of the OBSERVER role of FRONTEND, (only for administrators!) - -grammar: - -```sql -ALTER SYSTEM ADD OBSERVER "follower_host:edit_log_port" -```` - -illustrate: - -1. host can be a hostname or an ip address -2. edit_log_port : edit_log_port in its configuration file fe.conf - -### Example - -1. Add an OBSERVER node - - ```sql - ALTER SYSTEM ADD OBSERVER "host_ip:9010" - ```` - -### Keywords - - ALTER, SYSTEM, ADD, OBSERVER, ALTER SYSTEM - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DECOMMISSION-BACKEND.md b/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DECOMMISSION-BACKEND.md deleted file mode 100644 index 6ea943500d570d..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DECOMMISSION-BACKEND.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -{ - "title": "ALTER-SYSTEM-DECOMMISSION-BACKEND", - "language": "en" -} ---- - - - -## ALTER-SYSTEM-DECOMMISSION-BACKEND - -### Name - -ALTER SYSTEM DECOMMISSION BACKEND - -### Description - -The node offline operation is used to safely log off the node. The operation is asynchronous. If successful, the node is eventually removed from the metadata. If it fails, the logout will not be done (only for admins!) - -grammar: - -- Find backend through host and port - -```sql -ALTER SYSTEM DECOMMISSION BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; -```` - -- Find backend through backend_id - -```sql -ALTER SYSTEM DECOMMISSION BACKEND "id1","id2"...; -```` - - illustrate: - -1. host can be a hostname or an ip address -2. heartbeat_port is the heartbeat port of the node -3. The node offline operation is used to safely log off the node. The operation is asynchronous. If successful, the node is eventually removed from the metadata. If it fails, the logout will not be completed. -4. You can manually cancel the node offline operation. See CANCEL DECOMMISSION - -### Example - -1. Offline two nodes - - ```sql - ALTER SYSTEM DECOMMISSION BACKEND "host1:port", "host2:port"; - ```` - - ```sql - ALTER SYSTEM DECOMMISSION BACKEND "id1", "id2"; - ```` - -### Keywords - - ALTER, SYSTEM, DECOMMISSION, BACKEND, ALTER SYSTEM - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DROP-BACKEND.md b/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DROP-BACKEND.md deleted file mode 100644 index 2882654cc5969a..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DROP-BACKEND.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -{ - "title": "ALTER-SYSTEM-DROP-BACKEND", - "language": "en" -} ---- - - - -## ALTER-SYSTEM-DROP-BACKEND - -### Name - -ALTER SYSTEM DROP BACKEND - -### Description - -This statement is used to delete the BACKEND node (administrator only!) - -grammar: - -- Find backend through host and port - -```sql -ALTER SYSTEM DROP BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...] -```` -- Find backend through backend_id - -```sql -ALTER SYSTEM DROP BACKEND "id1","id2"...; -```` - -illustrate: - -1. host can be a hostname or an ip address -2. heartbeat_port is the heartbeat port of the node -3. Adding and deleting nodes is a synchronous operation. These two operations do not consider the existing data on the node, and the node is directly deleted from the metadata, please use it with caution. - -### Example - -1. Delete two nodes - - ```sql - ALTER SYSTEM DROP BACKEND "host1:port", "host2:port"; - ```` - - ```sql - ALTER SYSTEM DROP BACKEND "ids1", "ids2"; - ```` - -### Keywords - - ALTER, SYSTEM, DROP, BACKEND, ALTER SYSTEM - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DROP-BROKER.md b/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DROP-BROKER.md deleted file mode 100644 index af774f751dd261..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DROP-BROKER.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -{ - "title": "ALTER-SYSTEM-DROP-BROKER", - "language": "en" -} - ---- - - - -## ALTER-SYSTEM-DROP-BROKER - -### Name - -ALTER SYSTEM DROP BROKER - -### Description - -This statement is to delete the BROKER node, (administrator only) - -grammar: - -```sql --- Delete all brokers -ALTER SYSTEM DROP ALL BROKER broker_name --- Delete a Broker node -ALTER SYSTEM DROP BROKER broker_name "host:port"[,"host:port"...]; -```` - -### Example - -1. Delete all brokers - - ```sql - ALTER SYSTEM DROP ALL BROKER broker_name - ```` - -2. Delete a Broker node - - ```sql - ALTER SYSTEM DROP BROKER broker_name "host:port"[,"host:port"...]; - ```` - -### Keywords - - ALTER, SYSTEM, DROP, FOLLOWER, ALTER SYSTEM - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DROP-FOLLOWER.md b/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DROP-FOLLOWER.md deleted file mode 100644 index 664d43a750ec73..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DROP-FOLLOWER.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "ALTER-SYSTEM-DROP-FOLLOWER", - "language": "en" -} ---- - - - -## ALTER-SYSTEM-DROP-FOLLOWER - -### Name - -ALTER SYSTEM DROP FOLLOWER - -### Description - -This statement is to delete the node of the FOLLOWER role of FRONTEND, (only for administrators!) - -grammar: - -```sql -ALTER SYSTEM DROP FOLLOWER "follower_host:edit_log_port" -```` - -illustrate: - -1. host can be a hostname or an ip address -2. edit_log_port : edit_log_port in its configuration file fe.conf - -### Example - -1. Add a FOLLOWER node - - ```sql - ALTER SYSTEM DROP FOLLOWER "host_ip:9010" - ```` - -### Keywords - - ALTER, SYSTEM, DROP, FOLLOWER, ALTER SYSTEM - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DROP-OBSERVER.md b/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DROP-OBSERVER.md deleted file mode 100644 index 98166d9779b1d8..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-DROP-OBSERVER.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "ALTER-SYSTEM-DROP-OBSERVER", - "language": "en" -} ---- - - - -## ALTER-SYSTEM-DROP-OBSERVER - -### Name - -ALTER SYSTEM DROP OBSERVER - -### Description - -This statement is to delete the node of the OBSERVER role of FRONTEND, (only for administrators!) - -grammar: - -```sql -ALTER SYSTEM DROP OBSERVER "follower_host:edit_log_port" -```` - -illustrate: - -1. host can be a hostname or an ip address -2. edit_log_port : edit_log_port in its configuration file fe.conf - -### Example - -1. Add a FOLLOWER node - - ```sql - ALTER SYSTEM DROP OBSERVER "host_ip:9010" - ```` - -### Keywords - - ALTER, SYSTEM, DROP, OBSERVER, ALTER SYSTEM - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-MODIFY-BACKEND.md b/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-MODIFY-BACKEND.md deleted file mode 100644 index d4f13ccfdb6217..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/ALTER-SYSTEM-MODIFY-BACKEND.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -{ - "title": "ALTER-SYSTEM-MODIFY-BACKEND", - "language": "en" -} - ---- - - - -## ALTER-SYSTEM-MODIFY-BACKEND - -### Name - -ALTER SYSTEM MKDIFY BACKEND - -### Description - -Modify BE node properties (administrator only!) - -grammar: - -- Find backend through host and port - -```sql -ALTER SYSTEM MODIFY BACKEND "host:heartbeat_port" SET ("key" = "value"[, ...]); -```` - -- Find backend through backend_id - -```sql -ALTER SYSTEM MODIFY BACKEND "id1" SET ("key" = "value"[, ...]); -```` - - illustrate: - -1. host can be a hostname or an ip address -2. heartbeat_port is the heartbeat port of the node -3. Modify BE node properties The following properties are currently supported: - -- tag.xxxx: resource tag -- disable_query: query disable attribute -- disable_load: import disable attribute - -Note: -1. A backend can be set multi resource tags. But must contain "tag.location" type. - -### Example - -1. Modify the resource tag of BE - - ```sql - ALTER SYSTEM MODIFY BACKEND "host1:heartbeat_port" SET ("tag.location" = "group_a"); - ALTER SYSTEM MODIFY BACKEND "host1:heartbeat_port" SET ("tag.location" = "group_a", "tag.compute" = "c1"); - ```` - - ```sql - ALTER SYSTEM MODIFY BACKEND "id1" SET ("tag.location" = "group_a"); - ALTER SYSTEM MODIFY BACKEND "id1" SET ("tag.location" = "group_a", "tag.compute" = "c1"); - ```` - -2. Modify the query disable property of BE - - ```sql - ALTER SYSTEM MODIFY BACKEND "host1:heartbeat_port" SET ("disable_query" = "true"); - ```` - - ```sql - ALTER SYSTEM MODIFY BACKEND "id1" SET ("disable_query" = "true"); - ```` - -3. Modify the import disable property of BE - - ```sql - ALTER SYSTEM MODIFY BACKEND "host1:heartbeat_port" SET ("disable_load" = "true"); - ```` - - ```sql - ALTER SYSTEM MODIFY BACKEND "id1" SET ("disable_load" = "true"); - ```` - -### Keywords - - ALTER, SYSTEM, ADD, BACKEND, ALTER SYSTEM - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/CANCEL-ALTER-SYSTEM.md b/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/CANCEL-ALTER-SYSTEM.md deleted file mode 100644 index 24ab0c71b54c20..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Cluster-Management-Statements/CANCEL-ALTER-SYSTEM.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -{ - "title": "CANCEL-ALTER-SYSTEM", - "language": "en" -} ---- - - - -## CANCEL-ALTER-SYSTEM - -### Name - -CANCEL DECOMMISSION - -### Description - -This statement is used to undo a node offline operation. (Administrator only!) - -grammar: - -- Find backend through host and port - -```sql -CANCEL DECOMMISSION BACKEND "host:heartbeat_port"[,"host:heartbeat_port"...]; -```` - -- Find backend through backend_id - -```sql -CANCEL DECOMMISSION BACKEND "id1","id2","id3..."; -```` - -### Example - - 1. Cancel the offline operation of both nodes: - - ```sql - CANCEL DECOMMISSION BACKEND "host1:port", "host2:port"; - ```` - - 2. Cancel the offline operation of the node with backend_id 1: - - ```sql - CANCEL DECOMMISSION BACKEND "1","2"; - ``` - -### Keywords - - CANCEL, DECOMMISSION, CANCEL ALTER - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-ASYNC-MATERIALIZED-VIEW.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-ASYNC-MATERIALIZED-VIEW.md deleted file mode 100644 index 11f24aed95fa2a..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-ASYNC-MATERIALIZED-VIEW.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -{ - "title": "ALTER-ASYNC-MATERIALIZED-VIEW", - "language": "en" -} ---- - - - -## ALTER-ASYNC-MATERIALIZED-VIEW - -### Name - -ALTER ASYNC MATERIALIZED VIEW - -### Description - -This statement is used to modify asynchronous materialized views. - -#### syntax - -```sql -ALTER MATERIALIZED VIEW mvName=multipartIdentifier ((RENAME newName=identifier) - | (REFRESH (refreshMethod | refreshTrigger | refreshMethod refreshTrigger)) - | (SET LEFT_PAREN fileProperties=propertyItemList RIGHT_PAREN)) -``` - -#### illustrate - -##### RENAME - -Used to change the name of the materialized view - -For example, changing the name of mv1 to mv2 -```sql -ALTER MATERIALIZED VIEW mv1 rename mv2; -``` - -##### refreshMethod - -Same as [creating asynchronous materialized views](../Create/CREATE-ASYNC-MATERIALIZED-VIEW.md) - -##### refreshTrigger - -Same as [creating asynchronous materialized views](../Create/CREATE-ASYNC-MATERIALIZED-VIEW.md) - -##### SET -Modify properties unique to materialized views - -For example, modifying the grace_period of mv1 to 3000ms -```sql -ALTER MATERIALIZED VIEW mv1 set("grace_period"="3000"); -``` - -### Keywords - - ALTER, ASYNC, MATERIALIZED, VIEW - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-CATALOG.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-CATALOG.md deleted file mode 100644 index b20b33663633d8..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-CATALOG.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -{ - "title": "ALTER-CATALOG", - "language": "en" -} ---- - - - -## ALTER-CATALOG - -### Name - - - -ALTER CATALOG - - - -### Description - -This statement is used to set properties of the specified catalog. (administrator only) - -1) Rename the catalog - -```sql -ALTER CATALOG catalog_name RENAME new_catalog_name; -``` - -illustrate: -- The builtin catalog `internal` cannot be renamed -- Only the one who has at least Alter privilege can rename a catalog -- After renaming the catalog, use the REVOKE and GRANT commands to modify the appropriate user permissions - -2) Modify / add properties for the catalog - -```sql -ALTER CATALOG catalog_name SET PROPERTIES ('key1' = 'value1' [, 'key' = 'value2']); -``` - -Update values of specified keys. If a key does not exist in the catalog properties, it will be added. - -illustrate: -- property `type` cannot be modified. -- properties of builtin catalog `internal` cannot be modified. - -3) Modify comment for the catalog - -```sql -ALTER CATALOG catalog_name MODIFY COMMENT "new catalog comment"; -``` - -illustrate: -- The builtin catalog `internal` cannot be modified - -### Example - -1. rename catalog ctlg_hive to hive - -```sql -ALTER CATALOG ctlg_hive RENAME hive; -``` - -3. modify property `hive.metastore.uris` of catalog hive - -```sql -ALTER CATALOG hive SET PROPERTIES ('hive.metastore.uris'='thrift://172.21.0.1:9083'); -``` - -4. modify comment of catalog hive - -```sql -ALTER CATALOG hive MODIFY COMMENT "new catalog comment"; -``` - -### Keywords - -ALTER,CATALOG,RENAME,PROPERTY - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md deleted file mode 100644 index 54c87c05e67e70..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-COLOCATE-GROUP.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -{ -"title": "ALTER-COLOCATE-GROUP", -"language": "en" -} ---- - - - -## ALTER-COLOCATE-GROUP - -### Name - -ALTER COLOCATE GROUP - - - -### Description - -This statement is used to modify the colocation group. - -Syntax: - -```sql -ALTER COLOCATE GROUP [database.]group -SET ( - property_list -); -``` - -NOTE: - -1. If the colocate group is global, that is, its name starts with `__global__`, then it does not belong to any database; - -2. property_list is a colocation group attribute, currently only supports modifying `replication_num` and `replication_allocation`. After modifying these two attributes of the colocation group, at the same time, change the attribute `default.replication_allocation`, the attribute `dynamic.replication_allocation` of the table of the group, and the `replication_allocation` of the existing partition to be the same as it. - -### Example - -1. Modify the number of copies of a global group - - ```sql - # Set "colocate_with" = "__global__foo" when creating the table - - ALTER COLOCATE GROUP __global__foo - SET ( - "replication_num"="1" - ); - ``` - -2. Modify the number of copies of a non-global group - - ```sql - # Set "colocate_with" = "bar" when creating the table, and the Database is "example_db" - - ALTER COLOCATE GROUP example_db.bar - SET ( - "replication_num"="1" - ); - ``` - -### Keywords - -```sql -ALTER, COLOCATE, GROUP -``` - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-DATABASE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-DATABASE.md deleted file mode 100644 index f1b361e3edf754..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-DATABASE.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -{ - "title": "ALTER-DATABASE", - "language": "en" -} ---- - - - -## ALTER-DATABASE - -### Name - -ALTER DATABASE - -### Description - -This statement is used to set properties of the specified database. (administrator only) - -1) Set the database data quota, the unit is B/K/KB/M/MB/G/GB/T/TB/P/PB - -```sql -ALTER DATABASE db_name SET DATA QUOTA quota; -``` - -2) Rename the database - -```sql -ALTER DATABASE db_name RENAME new_db_name; -``` - -3) Set the quota for the number of copies of the database - -```sql -ALTER DATABASE db_name SET REPLICA QUOTA quota; -``` - -illustrate: - After renaming the database, use the REVOKE and GRANT commands to modify the appropriate user permissions, if necessary. - The default data quota for the database is 1024GB, and the default replica quota is 1073741824. - -4) Modify the properties of an existing database - -```sql -ALTER DATABASE db_name SET PROPERTIES ("key"="value", ...); -``` - -### Example - -1. Set the specified database data volume quota - -```sql -ALTER DATABASE example_db SET DATA QUOTA 10995116277760; -The above unit is bytes, which is equivalent to -ALTER DATABASE example_db SET DATA QUOTA 10T; - -ALTER DATABASE example_db SET DATA QUOTA 100G; - -ALTER DATABASE example_db SET DATA QUOTA 200M; -``` - -2. Rename the database example_db to example_db2 - -```sql -ALTER DATABASE example_db RENAME example_db2; -``` - -3. Set the quota for the number of copies of the specified database - -```sql -ALTER DATABASE example_db SET REPLICA QUOTA 102400; -``` - -4. Modify the default replica distribution policy for tables in db (this operation only applies to newly created tables and will not modify existing tables in db) - -```sql -ALTER DATABASE example_db SET PROPERTIES("replication_allocation" = "tag.location.default:2"); -``` - -5. Cancel the default replica distribution policy for tables in db (this operation only applies to newly created tables and will not modify existing tables in db) - -```sql -ALTER DATABASE example_db SET PROPERTIES("replication_allocation" = ""); -``` - -### Keywords - -```text -ALTER,DATABASE,RENAME -``` - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-RESOURCE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-RESOURCE.md deleted file mode 100644 index 2e26fc3d2eb76a..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-RESOURCE.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -{ -"title": "ALTER-RESOURCE", -"language": "en" -} ---- - - - -## ALTER-RESOURCE - - - -### Name - -ALTER RESOURCE - -### Description - -This statement is used to modify an existing resource. Only the root or admin user can modify resources. -Syntax: -```sql -ALTER RESOURCE 'resource_name' -PROPERTIES ("key"="value", ...); -``` - -Note: The resource type does not support modification. - -### Example - -1. Modify the working directory of the Spark resource named spark0: - -```sql -ALTER RESOURCE 'spark0' PROPERTIES ("working_dir" = "hdfs://127.0.0.1:10000/tmp/doris_new"); -``` -2. Modify the maximum number of connections to the S3 resource named remote_s3: - -```sql -ALTER RESOURCE 'remote_s3' PROPERTIES ("s3.connection.maximum" = "100"); -``` - -3. Modify information related to cold and hot separation S3 resources -- Support - - `s3.access_key` s3 ak - - `s3.secret_key` s3 sk - - `s3.session_token` s3 token - - `s3.connection.maximum` default 50 - - `s3.connection.timeout` default 1000ms - - `s3.connection.request.timeout` default 3000ms -- Not Support - - `s3.region` - - `s3.bucket"` - - `s3.root.path` - - `s3.endpoint` - -```sql - ALTER RESOURCE "showPolicy_1_resource" PROPERTIES("s3.connection.maximum" = "1111"); -``` -### Keywords - -```text -ALTER, RESOURCE -``` - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-SQL-BLOCK-RULE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-SQL-BLOCK-RULE.md deleted file mode 100644 index 9b2412dd05a1ad..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-SQL-BLOCK-RULE.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -{ - "title": "ALTER-SQL-BLOCK-RULE", - "language": "en" -} ---- - - - -## ALTER-SQL-BLOCK-RULE - -### Name - -ALTER SQL BLOCK RULE - -### Description - -Modify SQL blocking rules to allow modification of each item such as sql/sqlHash/partition_num/tablet_num/cardinality/global/enable. - -grammar: - -```sql -ALTER SQL_BLOCK_RULE rule_name -[PROPERTIES ("key"="value", ...)]; -```` - -illustrate: - -- sql and sqlHash cannot be set at the same time. This means that if a rule sets sql or sqlHash, the other attribute cannot be modified; -- sql/sqlHash and partition_num/tablet_num/cardinality cannot be set at the same time. For example, if a rule sets partition_num, then sql or sqlHash cannot be modified; - -### Example - -1. Modify according to SQL properties - -```sql -ALTER SQL_BLOCK_RULE test_rule PROPERTIES("sql"="select \\* from test_table","enable"="true") -```` - -2. If a rule sets partition_num, then sql or sqlHash cannot be modified - -```sql -ALTER SQL_BLOCK_RULE test_rule2 PROPERTIES("partition_num" = "10","tablet_num"="300","enable"="true") -```` - -### Keywords - -````text -ALTER,SQL_BLOCK_RULE -```` - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-STORAGE-POLICY.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-STORAGE-POLICY.md deleted file mode 100644 index 30b44285c050e5..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-STORAGE-POLICY.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ -"title": "ALTER-POLICY", -"language": "en" -} ---- - - - -## ALTER-POLICY - -### Name - -ALTER STORAGE POLICY - -### Description - -This statement is used to modify an existing cold and hot separation migration strategy. Only root or admin users can modify resources. - -```sql -ALTER STORAGE POLICY 'policy_name' -PROPERTIES ("key"="value", ...); -``` - -### Example - -1. Modify the name to coolown_datetime Cold and hot separation data migration time point: -```sql -ALTER STORAGE POLICY has_test_policy_to_alter PROPERTIES("cooldown_datetime" = "2023-06-08 00:00:00"); -``` -2. Modify the name to coolown_countdown of hot and cold separation data migration of ttl -```sql -ALTER STORAGE POLICY has_test_policy_to_alter PROPERTIES ("cooldown_ttl" = "10000"); -ALTER STORAGE POLICY has_test_policy_to_alter PROPERTIES ("cooldown_ttl" = "1h"); -ALTER STORAGE POLICY has_test_policy_to_alter PROPERTIES ("cooldown_ttl" = "3d"); -``` -### Keywords - -```sql -ALTER, STORAGE, POLICY -``` - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-COLUMN.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-COLUMN.md deleted file mode 100644 index 7897540a60d573..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-COLUMN.md +++ /dev/null @@ -1,309 +0,0 @@ ---- -{ - "title": "ALTER-TABLE-COLUMN", - "language": "en" -} ---- - - - -## Name - -ALTER TABLE COLUMN - -### Description - -This statement is used to perform a schema change operation on an existing table. The schema change is asynchronous, and the task is returned when the task is submitted successfully. After that, you can use the [SHOW ALTER TABLE COLUMN](../../Show-Statements/SHOW-ALTER.md) command to view the progress. - -Doris has the concept of materialized index after table construction. After successful table construction, it is the base table and the materialized index is the base index. rollup index can be created based on the base table. Both base index and rollup index are materialized indexes. If rollup_index_name is not specified during the schema change operation, the operation is based on the base table by default. - -:::tip -Doris 1.2.0 supports light schema change for light scale structure changes, and addition and subtraction operations for value columns can be completed more quickly and synchronously. You can manually specify "light_schema_change" = 'true' when creating a table. This parameter is enabled by default for versions 2.0.0 and later. -::: - -### Grammar: - -```sql -ALTER TABLE [database.]table alter_clause; -``` - -The alter_clause of schema change supports the following modification methods: - -**1. Add a column to the specified position at the specified index** - -**Grammar** - -```sql -ALTER TABLE [database.]table table_name ADD COLUMN column_name column_type [KEY | agg_type] [DEFAULT "default_value"] -[AFTER column_name|FIRST] -[TO rollup_index_name] -[PROPERTIES ("key"="value", ...)] -``` - -**Example** - -1. Add a key column new_col to example_db.my_table after key_1 (non-aggregated model) - - ```sql - ALTER TABLE example_db.my_table - ADD COLUMN new_col INT KEY DEFAULT "0" AFTER key_1; - ``` - -2. Add a value column new_col to example_db.my_table after value_1 (non-aggregate model) - - ```sql - ALTER TABLE example_db.my_table - ADD COLUMN new_col INT DEFAULT "0" AFTER value_1; - ``` - -3. Add a key column new_col (aggregate model) to example_db.my_table after key_1 - - ```sql - ALTER TABLE example_db.my_table - ADD COLUMN new_col INT KEY DEFAULT "0" AFTER key_1; - ``` - -4. Add a value column to example_db.my_table after value_1 new_col SUM Aggregation type (aggregation model) - - ```sql - ALTER TABLE example_db.my_table - ADD COLUMN new_col INT SUM DEFAULT "0" AFTER value_1; - ``` - -5. Add new_col to the first column position of the example_db.my_table table (non-aggregated model) - - ```sql - ALTER TABLE example_db.my_table - ADD COLUMN new_col INT KEY DEFAULT "0" FIRST; - ``` - -:::tip -- If you add a value column to the aggregation model, you need to specify agg_type -- For non-aggregated models (such as DUPLICATE KEY), if you add a key column, you need to specify the KEY keyword -- You cannot add columns that already exist in the base index to the rollup index (you can recreate a rollup index if necessary) -::: - - -**2. Add multiple columns to the specified index** - -**Grammar** - -```sql -ALTER TABLE [database.]table table_name ADD COLUMN (column_name1 column_type [KEY | agg_type] DEFAULT "default_value", ...) -[TO rollup_index_name] -[PROPERTIES ("key"="value", ...)] -``` - - -**Example** - -1. Add multiple columns to example_db.my_table, where new_col and new_col2 are SUM aggregate types (aggregate model) - - ```sql - ALTER TABLE example_db.my_table - ADD COLUMN (new_col1 INT SUM DEFAULT "0" ,new_col2 INT SUM DEFAULT "0"); - ``` - -2. Add multiple columns to example_db.my_table (non-aggregated model), where new_col1 is the KEY column and new_col2 is the value column - - ```sql - ALTER TABLE example_db.my_table - ADD COLUMN (new_col1 INT key DEFAULT "0" , new_col2 INT DEFAULT "0"); - ``` - -:::tip - - If you add a value column to the aggregation model, you need to specify agg_type - - If you add a key column to the aggregation model, you need to specify the KEY keyword - - You cannot add columns that already exist in the base index to the rollup index (you can recreate a rollup index if necessary) -::: - -**3. Delete a column from the specified index** - -**Grammar*** - - ```sql - ALTER TABLE [database.]table table_name DROP COLUMN column_name - [FROM rollup_index_name] - ``` - -**Example** - -1. Delete column col1 from example_db.my_table - - ```sql - ALTER TABLE example_db.my_table DROP COLUMN col1; - ``` - -:::tip - - Cannot drop partition column - - The aggregate model cannot delete KEY columns - - If the column is removed from the base index, it will also be removed if it is included in the rollup index -::: - -**4. Modify the column type and column position of the specified index** - -**Grammar** - -```sql -ALTER TABLE [database.]table table_name MODIFY COLUMN column_name column_type [KEY | agg_type] [NULL | NOT NULL] [DEFAULT "default_value"] -[AFTER column_name|FIRST] -[FROM rollup_index_name] -[PROPERTIES ("key"="value", ...)] -``` - -**Example** - -1. Modify the type of the key column col1 of the base index to BIGINT and move it to the back of the col2 column - - ```sql - ALTER TABLE example_db.my_table - MODIFY COLUMN col1 BIGINT KEY DEFAULT "1" AFTER col2; - ``` - - :::tip - Whether you modify the key column or the value column, you need to declare complete column information - ::: - -2. Modify the maximum length of the val1 column of base index. The original val1 is (val1 VARCHAR(32) REPLACE DEFAULT "abc") - - ```sql - ALTER TABLE example_db.my_table - MODIFY COLUMN val1 VARCHAR(64) REPLACE DEFAULT "abc"; - ``` - - :::tip - You can only modify the column's data type; other attributes of the column must remain unchanged. - ::: - -3. Modify the length of a field in the Key column of the Duplicate key table - - ```sql - ALTER TABLE example_db.my_table - MODIFY COLUMN k3 VARCHAR(50) KEY NULL COMMENT 'to 50'; - ``` - -:::tip - - If you modify the value column in the aggregation model, you need to specify agg_type - - If you modify the key column for non-aggregate types, you need to specify the KEY keyword - - Only the type of the column can be modified, and other attributes of the column remain as they are (that is, other attributes need to be explicitly written in the statement according to the original attributes, see example 8) - - Partitioning and bucketing columns cannot be modified in any way - - The following types of conversions are currently supported (loss of precision is guaranteed by the user) - - Conversion of TINYINT/SMALLINT/INT/BIGINT/LARGEINT/FLOAT/DOUBLE types to larger numeric types - - Convert TINTINT/SMALLINT/INT/BIGINT/LARGEINT/FLOAT/DOUBLE/DECIMAL to VARCHAR - - VARCHAR supports modifying the maximum length - - VARCHAR/CHAR converted to TINTINT/SMALLINT/INT/BIGINT/LARGEINT/FLOAT/DOUBLE - - Convert VARCHAR/CHAR to DATE (currently supports "%Y-%m-%d", "%y-%m-%d", "%Y%m%d", "%y%m%d", "%Y/%m/%d, "%y/%m/%d" six formats) - - Convert DATETIME to DATE (only keep year-month-day information, for example: `2019-12-09 21:47:05` <--> `2019-12-09`) - - DATE is converted to DATETIME (hours, minutes and seconds are automatically filled with zeros, for example: `2019-12-09` <--> `2019-12-09 00:00:00`) - - Convert FLOAT to DOUBLE - - INT is converted to DATE (if the INT type data is illegal, the conversion fails, and the original data remains unchanged) - - All can be converted to STRING except DATE and DATETIME, but STRING cannot be converted to any other type -::: - -**5. Reorder the column at the specified index** - -**Grammar** - - ```sql - ALTER TABLE [database.]table table_name ORDER BY (column_name1, column_name2, ...) - [FROM rollup_index_name] - [PROPERTIES ("key"="value", ...)] - ``` - -**Example** - -1. Adjust the order of the key and value columns of example_db.my_table (non-aggregate model) - - ```sql - CREATE TABLE `my_table`( - `k_1` INT NULL, - `k_2` INT NULL, - `v_1` INT NULL, - `v_2` varchar NULL, - `v_3` varchar NULL - ) ENGINE=OLAP - DUPLICATE KEY(`k_1`, `k_2`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`k_1`) BUCKETS 5 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1" - ); - - ALTER TABLE example_db.my_table ORDER BY (k_2,k_1,v_3,v_2,v_1); - - mysql> desc my_table; - +-------+------------+------+-------+---------+-------+ - | Field | Type | Null | Key | Default | Extra | - +-------+------------+------+-------+---------+-------+ - | k_2 | INT | Yes | true | NULL | | - | k_1 | INT | Yes | true | NULL | | - | v_3 | VARCHAR(*) | Yes | false | NULL | NONE | - | v_2 | VARCHAR(*) | Yes | false | NULL | NONE | - | v_1 | INT | Yes | false | NULL | NONE | - +-------+------------+------+-------+---------+-------+ - ``` - -2. Do two actions simultaneously - - ```sql - CREATE TABLE `my_table` ( - `k_1` INT NULL, - `k_2` INT NULL, - `v_1` INT NULL, - `v_2` varchar NULL, - `v_3` varchar NULL - ) ENGINE=OLAP - DUPLICATE KEY(`k_1`, `k_2`) - COMMENT 'OLAP' - DISTRIBUTED BY HASH(`k_1`) BUCKETS 5 - PROPERTIES ( - "replication_allocation" = "tag.location.default: 1" - ); - - ALTER TABLE example_db.my_table - ADD COLUMN col INT DEFAULT "0" AFTER v_1, - ORDER BY (k_2,k_1,v_3,v_2,v_1,col); - - mysql> desc my_table; - +-------+------------+------+-------+---------+-------+ - | Field | Type | Null | Key | Default | Extra | - +-------+------------+------+-------+---------+-------+ - | k_2 | INT | Yes | true | NULL | | - | k_1 | INT | Yes | true | NULL | | - | v_3 | VARCHAR(*) | Yes | false | NULL | NONE | - | v_2 | VARCHAR(*) | Yes | false | NULL | NONE | - | v_1 | INT | Yes | false | NULL | NONE | - | col | INT | Yes | false | 0 | NONE | - +-------+------------+------+-------+---------+-------+ - ``` - -:::tip - - All columns in index are written out - - the value column comes after the key column - - You can adjust the key column only within the range of the key column. The same applies to the value column -::: - -### Keywords - -```text -ALTER, TABLE, COLUMN, ALTER TABLE -``` - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-COMMENT.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-COMMENT.md deleted file mode 100644 index 4957ae25038f43..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-COMMENT.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -{ - "title": "ALTER-TABLE-COMMENT", - "language": "en" -} ---- - - - -## ALTER-TABLE-COMMENT - -### Name - -ALTER TABLE COMMENT - -### Description - -This statement is used to modify the comment of an existing table. The operation is synchronous, and the command returns to indicate completion. - -grammar: - -```sql -ALTER TABLE [database.]table alter_clause; -``` - -1. Modify table comment - -grammar: - -```sql -MODIFY COMMENT "new table comment"; -``` - -2. Modify column comment - -grammar: - -```sql -MODIFY COLUMN col1 COMMENT "new column comment"; -``` - -### Example - -1. Change the table1's comment to table1_comment - -```sql -ALTER TABLE table1 MODIFY COMMENT "table1_comment"; -``` - -2. Change the table1's col1 comment to table1_comment - -```sql -ALTER TABLE table1 MODIFY COLUMN col1 COMMENT "table1_col1_comment"; -``` - -### Keywords - -```text -ALTER, TABLE, COMMENT, ALTER TABLE -``` - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-PARTITION.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-PARTITION.md deleted file mode 100644 index dfc7ebdd331640..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-PARTITION.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -{ - "title": "ALTER-TABLE-PARTITION", - "language": "en" -} ---- - - - -## ALTER-TABLE-PARTITION - -### Name - -ALTER TABLE PARTITION - -### Description - -This statement is used to modify a table with a partition. - -This operation is synchronous, and the return of the command indicates the completion of the execution. - -grammar: - -```sql -ALTER TABLE [database.]table alter_clause; -``` - -The alter_clause of partition supports the following modification methods - -1. Add partition - -grammar: - -```sql -ADD PARTITION [IF NOT EXISTS] partition_name -partition_desc ["key"="value"] -[DISTRIBUTED BY HASH (k1[,k2 ...]) [BUCKETS num]] -``` - -Notice: - -- partition_desc supports the following two ways of writing - - VALUES LESS THAN [MAXVALUE|("value1", ...)] - - VALUES [("value1", ...), ("value1", ...)) -- The partition is left closed and right open. If the user only specifies the right boundary, the system will automatically determine the left boundary -- If the bucketing method is not specified, the bucketing method and bucket number used for creating the table would be automatically used -- If the bucketing method is specified, only the number of buckets can be modified, not the bucketing method or the bucketing column. If the bucketing method is specified but the number of buckets not be specified, the default value `10` will be used for bucket number instead of the number specified when the table is created. If the number of buckets modified, the bucketing method needs to be specified simultaneously. -- The ["key"="value"] section can set some attributes of the partition, see [CREATE TABLE](../Create/CREATE-TABLE.md) -- If the user does not explicitly create a partition when creating a table, adding a partition by ALTER is not supported -- If the user uses list partition then they can add default partition to the table. The default partition will store all data not satisfying prior partition key's constraints. - - ALTER TABLE table_name ADD PARTITION partition_name - -2. Delete the partition - -grammar: - -```sql -DROP PARTITION [IF EXISTS] partition_name [FORCE] -``` - - Notice: - -- At least one partition must be reserved for tables using partitioning. -- After executing DROP PARTITION for a period of time, the deleted partition can be recovered through the RECOVER statement. For details, see SQL Manual - Database Management - RECOVER Statement -- If you execute DROP PARTITION FORCE, the system will not check whether there are unfinished transactions in the partition, the partition will be deleted directly and cannot be recovered, this operation is generally not recommended - -3. Modify the partition properties - - grammar: - -```sql -MODIFY PARTITION p1|(p1[, p2, ...]) SET ("key" = "value", ...) -``` - -illustrate: - -- Currently supports modifying the following properties of partitions: - - storage_medium - -storage_cooldown_time - - replication_num - - in_memory -- For single-partition tables, partition_name is the same as the table name. - -### Example - -1. Add partition, existing partition [MIN, 2013-01-01), add partition [2013-01-01, 2014-01-01), use default bucketing method - -```sql -ALTER TABLE example_db.my_table -ADD PARTITION p1 VALUES LESS THAN ("2014-01-01"); -``` - -2. Increase the partition and use the new number of buckets - -```sql -ALTER TABLE example_db.my_table -ADD PARTITION p1 VALUES LESS THAN ("2015-01-01") -DISTRIBUTED BY HASH(k1) BUCKETS 20; -``` - -3. Increase the partition and use the new number of replicas - -```sql -ALTER TABLE example_db.my_table -ADD PARTITION p1 VALUES LESS THAN ("2015-01-01") -("replication_num"="1"); -``` - -4. Modify the number of partition replicas - -```sql -ALTER TABLE example_db.my_table -MODIFY PARTITION p1 SET("replication_num"="1"); -``` - -5. Batch modify the specified partition - -```sql -ALTER TABLE example_db.my_table -MODIFY PARTITION (p1, p2, p4) SET("replication_num"="1"); -``` - -6. Batch modify all partitions - -```sql -ALTER TABLE example_db.my_table -MODIFY PARTITION (*) SET("storage_medium"="HDD"); -``` - -7. Delete partition - -```sql -ALTER TABLE example_db.my_table -DROP PARTITION p1; -``` - -8. Batch delete partition - -```sql -ALTER TABLE example_db.my_table -DROP PARTITION p1, -DROP PARTITION p2, -DROP PARTITION p3; -``` - -9. Add a partition specifying upper and lower bounds - -```sql -ALTER TABLE example_db.my_table -ADD PARTITION p1 VALUES [("2014-01-01"), ("2014-02-01")); -``` - -### Keywords - -```text -ALTER, TABLE, PARTITION, ALTER TABLE -``` - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-PROPERTY.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-PROPERTY.md deleted file mode 100644 index 24e8bd917cce36..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-PROPERTY.md +++ /dev/null @@ -1,283 +0,0 @@ ---- -{ - "title": "ALTER-TABLE-PROPERTY", - "language": "en" -} ---- - - - -## ALTER-TABLE-PROPERTY - -### Name - -ALTER TABLE PROPERTY - -### Description - -This statement is used to modify the properties of an existing table. This operation is synchronous, and the return of the command indicates the completion of the execution. - -Modify the properties of the table, currently supports modifying the bloom filter column, the colocate_with attribute and the dynamic_partition attribute, the replication_num and default.replication_num. - -grammar: - -```sql -ALTER TABLE [database.]table alter_clause; -``` - -The alter_clause of property supports the following modification methods. - -Note: - -Can also be merged into the above schema change operation to modify, see the example below - -1. Modify the bloom filter column of the table - -```sql -ALTER TABLE example_db.my_table SET ("bloom_filter_columns"="k1,k2,k3"); -``` - -Can also be incorporated into the schema change operation above (note that the syntax for multiple clauses is slightly different) - -```sql -ALTER TABLE example_db.my_table -DROP COLUMN col2 -PROPERTIES ("bloom_filter_columns"="k1,k2,k3"); -``` - -2. Modify the Colocate property of the table - -```sql -ALTER TABLE example_db.my_table set ("colocate_with" = "t1"); -``` - -3. Change the bucketing method of the table from Hash Distribution to Random Distribution - -```sql -ALTER TABLE example_db.my_table set ("distribution_type" = "random"); -``` - -4. Modify the dynamic partition attribute of the table (support adding dynamic partition attribute to the table without dynamic partition attribute) - -```sql -ALTER TABLE example_db.my_table set ("dynamic_partition.enable" = "false"); -``` - -If you need to add dynamic partition attributes to tables without dynamic partition attributes, you need to specify all dynamic partition attributes - (Note: adding dynamic partition attributes is not supported for non-partitioned tables) - -```sql -ALTER TABLE example_db.my_table set ( - "dynamic_partition.enable" = "true", - "dynamic_partition.time_unit" = "DAY", - "dynamic_partition.end" = "3", - "dynamic_partition.prefix" = "p", - "dynamic_partition. buckets" = "32" -); -``` - -5. Modify the in_memory attribute of the table, only can set value 'false' - -```sql -ALTER TABLE example_db.my_table set ("in_memory" = "false"); -``` - -6. Enable batch delete function - -```sql -ALTER TABLE example_db.my_table ENABLE FEATURE "BATCH_DELETE"; -``` - -Note: - -- Only support unique tables -- Batch deletion is supported for old tables, while new tables are already supported when they are created - -7. Enable the function of ensuring the import order according to the value of the sequence column - -```sql -ALTER TABLE example_db.my_table ENABLE FEATURE "SEQUENCE_LOAD" WITH PROPERTIES ( - "function_column.sequence_type" = "Date" -); -``` - -Note: - -- Only support unique tables -- The sequence_type is used to specify the type of the sequence column, which can be integral and time type -- Only the orderliness of newly imported data is supported. Historical data cannot be changed - -8. Change the default number of buckets for the table to 50 - -```sql -ALTER TABLE example_db.my_table MODIFY DISTRIBUTION DISTRIBUTED BY HASH(k1) BUCKETS 50; -``` - -Note: - -- Only support non colocate table with RANGE partition and HASH distribution - -9. Modify table comments - -```sql -ALTER TABLE example_db.my_table MODIFY COMMENT "new comment"; -``` - -10. Modify column comments - -```sql -ALTER TABLE example_db.my_table MODIFY COLUMN k1 COMMENT "k1", MODIFY COLUMN k2 COMMENT "k2"; -``` - -11. Modify the engine type - -Only the MySQL type can be changed to the ODBC type. The value of driver is the name of the driver in the odbc.init configuration. - -```sql -ALTER TABLE example_db.mysql_table MODIFY ENGINE TO odbc PROPERTIES("driver" = "MySQL"); -``` - -12. Modify the number of copies - -```sql -ALTER TABLE example_db.mysql_table SET ("replication_num" = "2"); -ALTER TABLE example_db.mysql_table SET ("default.replication_num" = "2"); -ALTER TABLE example_db.mysql_table SET ("replication_allocation" = "tag.location.default: 1"); -ALTER TABLE example_db.mysql_table SET ("default.replication_allocation" = "tag.location.default: 1"); -```` - -Note: -1. The property with the default prefix indicates the default replica distribution for the modified table. This modification does not modify the current actual replica distribution of the table, but only affects the replica distribution of newly created partitions on the partitioned table. -2. For non-partitioned tables, modifying the replica distribution property without the default prefix will modify both the default replica distribution and the actual replica distribution of the table. That is, after the modification, through the `show create table` and `show partitions from tbl` statements, you can see that the replica distribution has been modified. -changed. -3. For partitioned tables, the actual replica distribution of the table is at the partition level, that is, each partition has its own replica distribution, which can be viewed through the `show partitions from tbl` statement. If you want to modify the actual replica distribution, see `ALTER TABLE PARTITION`. - -13\. **[Experimental]** turn on `light_schema_change` - - For tables that were not created with light_schema_change enabled, you can enable it by using the following statement. - -```sql -ALTER TABLE example_db.mysql_table SET ("light_schema_change" = "true"); -``` - -### Example - -1. Modify the bloom filter column of the table - -```sql -ALTER TABLE example_db.my_table SET ("bloom_filter_columns"="k1,k2,k3"); -``` - -Can also be incorporated into the schema change operation above (note that the syntax for multiple clauses is slightly different) - -```sql -ALTER TABLE example_db.my_table -DROP COLUMN col2 -PROPERTIES ("bloom_filter_columns"="k1,k2,k3"); -``` - -2. Modify the Colocate property of the table - -```sql -ALTER TABLE example_db.my_table set ("colocate_with" = "t1"); -``` - -3. Change the bucketing method of the table from Hash Distribution to Random Distribution - -```sql -ALTER TABLE example_db.my_table set ("distribution_type" = "random"); -``` - -4. Modify the dynamic partition attribute of the table (support adding dynamic partition attribute to the table without dynamic partition attribute) - -```sql -ALTER TABLE example_db.my_table set ("dynamic_partition.enable" = "false"); -``` - -If you need to add dynamic partition attributes to tables without dynamic partition attributes, you need to specify all dynamic partition attributes - (Note: adding dynamic partition attributes is not supported for non-partitioned tables) - -```sql -ALTER TABLE example_db.my_table set ("dynamic_partition.enable" = "true", "dynamic_partition.time_unit" = "DAY", "dynamic_partition.end" = "3", "dynamic_partition.prefix" = "p", "dynamic_partition. buckets" = "32"); -``` - -5. Modify the in_memory attribute of the table, only can set value 'false' - -```sql -ALTER TABLE example_db.my_table set ("in_memory" = "false"); -``` - -6. Enable batch delete function - -```sql -ALTER TABLE example_db.my_table ENABLE FEATURE "BATCH_DELETE"; -``` - -7. Enable the function of ensuring the import order according to the value of the sequence column - -```sql -ALTER TABLE example_db.my_table ENABLE FEATURE "SEQUENCE_LOAD" WITH PROPERTIES ("function_column.sequence_type" = "Date"); -``` - -8. Change the default number of buckets for the table to 50 - -```sql -ALTER TABLE example_db.my_table MODIFY DISTRIBUTION DISTRIBUTED BY HASH(k1) BUCKETS 50; -``` - -9. Modify table comments - -```sql -ALTER TABLE example_db.my_table MODIFY COMMENT "new comment"; -``` - -10. Modify column comments - -```sql -ALTER TABLE example_db.my_table MODIFY COLUMN k1 COMMENT "k1", MODIFY COLUMN k2 COMMENT "k2"; -``` - -11. Modify the engine type - -```sql -ALTER TABLE example_db.mysql_table MODIFY ENGINE TO odbc PROPERTIES("driver" = "MySQL"); -``` - -12. Add a cold and hot separation data migration strategy to the table -```sql - ALTER TABLE create_table_not_have_policy set ("storage_policy" = "created_create_table_alter_policy"); -``` -NOTE:The table can be successfully added only if it hasn't been associated with a storage policy. A table just can have one storage policy. - -13. Add a hot and cold data migration strategy to the partition of the table -```sql -ALTER TABLE create_table_partition MODIFY PARTITION (*) SET("storage_policy"="created_create_table_partition_alter_policy"); -``` -NOTE:The table's partition can be successfully added only if it hasn't been associated with a storage policy. A table just can have one storage policy. - - -### Keywords - -```text -ALTER, TABLE, PROPERTY, ALTER TABLE -``` - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-RENAME.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-RENAME.md deleted file mode 100644 index 2e176339cbb042..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-RENAME.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -{ - "title": "ALTER-TABLE-RENAME", - "language": "en" -} ---- - - - -## ALTER-TABLE-RENAME - -### Name - -ALTER TABLE RENAME - -### Description - -This statement is used to rename certain names of existing table properties. This operation is synchronous, and the return of the command indicates the completion of the execution. - -grammar: - -```sql -ALTER TABLE [database.]table alter_clause; -``` - -The alter_clause of rename supports modification of the following names - -1. Modify the table name - -grammar: - -```sql -RENAME new_table_name; -``` - -2. Modify the rollup index name - - grammar: - -```sql -RENAME ROLLUP old_rollup_name new_rollup_name; -``` - -3. Modify the partition name - -grammar: - -```sql -RENAME PARTITION old_partition_name new_partition_name; -``` - -4. Modify the column name - - - -Modify the column name - - - -grammar: - -```sql -RENAME COLUMN old_column_name new_column_name; -``` - -Notice: -- When creating a table, you need to set 'light_schema_change=true' in the property. - - -### Example - -1. Modify the table named table1 to table2 - -```sql -ALTER TABLE table1 RENAME table2; -``` - -2. Modify the rollup index named rollup1 in the table example_table to rollup2 - -```sql -ALTER TABLE example_table RENAME ROLLUP rollup1 rollup2; -``` - -3. Modify the partition named p1 in the table example_table to p2 - -```sql -ALTER TABLE example_table RENAME PARTITION p1 p2; -``` - -4. Modify the column named c1 in the table example_table to c2 - -```sql -ALTER TABLE example_table RENAME COLUMN c1 c2; -``` - -### Keywords - -```text -ALTER, TABLE, RENAME, ALTER TABLE -``` - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-REPLACE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-REPLACE.md deleted file mode 100644 index 069e740b662338..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-REPLACE.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -{ - "title": "ALTER-TABLE-REPLACE", - "language": "en" -} ---- - - - -## ALTER-TABLE-REPLACE - -### Name - -ALTER TABLE REPLACE - -### Description - -Atomic substitution of two tables. This operation applies only to OLAP tables. - -```sql -ALTER TABLE [db.]tbl1 REPLACE WITH TABLE tbl2 -[PROPERTIES('swap' = 'true')]; -``` - -Replace table tbl1 with table tbl2. - -If the `swap` parameter is `true`, the data in the table named `tbl1` will be the data in the original table named `tbl2` after the replacement. The data in the table named `tbl2` is the data in the original `tbl1` table. That is, two tables of data have been swapped. - -If the `swap` parameter is `false`, the data in the `tbl1` table will be the data in the `tbl2` table after the replacement. The table named `tbl2` is deleted. - -#### Theory - -The replace table function actually turns the following set of operations into an atomic operation. - -If you want to replace table A with table B and `swap` is `true`, do the following: - -1. Rename table B as table A. -2. Rename table A as table B. - -If `swap` is `false`, do as follows: - -1. Delete table A. -2. Rename table B as table A. - -#### Notice -1. The default `swap` parameter is `true`. That is, a table replacement operation is equivalent to an exchange of data between two tables. -2. If the `swap` parameter is set to false, the replaced table (table A) will be deleted and cannot be restored. -3. The replacement operation can only occur between two OLAP tables and does not check whether the table structure of the two tables is consistent. -4. The original permission Settings are not changed. Because the permission check is based on the table name. - -### Example - -1. Atomic swap `tbl1` with `tbl2` without dropping any tables(Note: if you delete it, you actually delete tbl1 and rename tbl2 to tbl1.) - -```sql -ALTER TABLE tbl1 REPLACE WITH TABLE tbl2; -``` -or -```sql -ALTER TABLE tbl1 REPLACE WITH TABLE tbl2 PROPERTIES('swap' = 'true'); -``` - -2. Atomic swap `tbl1` with `tbl2` and deleting the `tbl2` table(Keep `tbl1` and the data of the original `tbl2`) - -```sql -ALTER TABLE tbl1 REPLACE WITH TABLE tbl2 PROPERTIES('swap' = 'false'); -``` - - - -### Keywords - -```text -ALTER, TABLE, REPLACE, ALTER TABLE -``` - -### Best Practice -1. Atomic overlay write operations - - In some cases, the user wants to be able to rewrite the data of a certain table, but if the data is deleted first and then imported, the data cannot be viewed for a period of time in between. At this time, the user can first use the `CREATE TABLE LIKE` statement to create a new table with the same structure, import the new data into the new table, and use the replacement operation to atomically replace the old table to achieve the goal. Atomic overwrite write operations at the partition level, see [temp partition documentation](../../../../advanced/partition/table-temp-partition.md). diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-ROLLUP.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-ROLLUP.md deleted file mode 100644 index 45c4c34bb90925..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-TABLE-ROLLUP.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -{ - "title": "ALTER-TABLE-ROLLUP", - "language": "en" -} ---- - - - -## ALTER-TABLE-ROLLUP - -### Name - -ALTER TABLE ROLLUP - -### Description - -This statement is used to perform a rollup modification operation on an existing table. The rollup is an asynchronous operation, and the task is returned when the task is submitted successfully. After that, you can use the [SHOW ALTER](../../Show-Statements/SHOW-ALTER.md) command to view the progress. - -grammar: - -```sql -ALTER TABLE [database.]table alter_clause; -``` - -The alter_clause of rollup supports the following creation methods - -1. Create a rollup index - -grammar: - -```sql -ADD ROLLUP rollup_name (column_name1, column_name2, ...) -[FROM from_index_name] -[PROPERTIES ("key"="value", ...)] -``` - -properties: Support setting timeout time, the default timeout time is 1 day. - -2. Create rollup indexes in batches - -grammar: - -```sql -ADD ROLLUP [rollup_name (column_name1, column_name2, ...) - [FROM from_index_name] - [PROPERTIES ("key"="value", ...)],...] -``` - -Notice: - -- If from_index_name is not specified, it will be created from base index by default -- Columns in rollup table must be columns already in from_index -- In properties, the storage format can be specified. For details, see [CREATE TABLE](../Create/CREATE-TABLE.md) - -3. Delete rollup index - - grammar: - -```sql -DROP ROLLUP rollup_name [PROPERTIES ("key"="value", ...)] -``` - -4. Batch delete rollup index - -grammar: - -```sql -DROP ROLLUP [rollup_name [PROPERTIES ("key"="value", ...)],...] -``` - -Notice: - -- cannot delete base index - -### Example - -1. Create index: example_rollup_index, based on base index (k1,k2,k3,v1,v2). Columnar storage. - -```sql -ALTER TABLE example_db.my_table -ADD ROLLUP example_rollup_index(k1, k3, v1, v2); -``` - -2. Create index: example_rollup_index2, based on example_rollup_index (k1,k3,v1,v2) - -```sql -ALTER TABLE example_db.my_table -ADD ROLLUP example_rollup_index2 (k1, v1) -FROM example_rollup_index; -``` - -3. Create index: example_rollup_index3, based on base index (k1,k2,k3,v1), with a custom rollup timeout of one hour. - -```sql -ALTER TABLE example_db.my_table -ADD ROLLUP example_rollup_index(k1, k3, v1) -PROPERTIES("timeout" = "3600"); -``` - -4. Delete index: example_rollup_index2 - -```sql -ALTER TABLE example_db.my_table -DROP ROLLUP example_rollup_index2; -``` - -5. Batch Delete rollup index - -```sql -ALTER TABLE example_db.my_table -DROP ROLLUP example_rollup_index2,example_rollup_index3; -``` - -### - -4. Keywords - -```text -ALTER, TABLE, ROLLUP, ALTER TABLE -``` - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-VIEW.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-VIEW.md deleted file mode 100644 index b471b034e16c45..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-VIEW.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -{ - "title": "ALTER-VIEW", - "language": "en" -} ---- - - - -## ALTER-VIEW - -### Name - -ALTER VIEW - -### Description - -This statement is used to modify the definition of a view - -grammar: - -```sql -ALTER VIEW -[db_name.]view_name -(column1[ COMMENT "col comment"][, column2, ...]) -AS query_stmt -``` - -illustrate: - -- Views are all logical, and the data in them will not be stored on physical media. When querying, the view will be used as a subquery in the statement. Therefore, modifying the definition of the view is equivalent to modifying query_stmt. -- query_stmt is any supported SQL - -### Example - -1. Modify the view example_view on example_db - -```sql -ALTER VIEW example_db.example_view -( -c1 COMMENT "column 1", -c2 COMMENT "column 2", -c3 COMMENT "column 3" -) -AS SELECT k1, k2, SUM(v1) FROM example_table -GROUP BY k1, k2 -``` - -### Keywords - -```text -ALTER, VIEW -``` - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-WORKLOAD-GROUP.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-WORKLOAD-GROUP.md deleted file mode 100644 index c46044b9beec99..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/ALTER-WORKLOAD-GROUP.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -{ -"title": "ALTER-WORKLOAD-GROUP", -"language": "en" -} ---- - - - -## ALTER-WORKLOAD-GROUP - -### Name - -ALTER WORKLOAD GROUP - - - -### Description - -This statement is used to modify the workload group. - -Syntax: - -```sql -ALTER WORKLOAD GROUP "rg_name" -PROPERTIES ( - property_list -); -``` - -NOTE: - -* Modify the memory_limit property in such a way that the sum of all memory_limit values does not exceed 100%; -* Support modifying some properties, for example, if only cpu_share is modified, just fill in cpu_share in properties. - -### Example - -1. Modify the workload group named g1: - - ```sql - alter workload group g1 - properties ( - "cpu_share"="30", - "memory_limit"="30%" - ); - ``` - -### Keywords - -```sql -ALTER, WORKLOAD, GROUP -``` - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/CANCEL-ALTER-TABLE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/CANCEL-ALTER-TABLE.md deleted file mode 100644 index ba825fbc4c29b8..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/CANCEL-ALTER-TABLE.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -{ - "title": "CANCEL-ALTER-TABLE", - "language": "en" -} ---- - - - -## CANCEL-ALTER-TABLE - -### Name - -CANCEL ALTER TABLE - -### Description - -This statement is used to undo an ALTER operation. - -1. Undo the ALTER TABLE COLUMN operation - -grammar: - -```sql -CANCEL ALTER TABLE COLUMN -FROM db_name.table_name -``` - -2. Undo the ALTER TABLE ROLLUP operation - -grammar: - -```sql -CANCEL ALTER TABLE ROLLUP -FROM db_name.table_name -``` - -3. Batch cancel rollup operations based on job id - -grammar: - -```sql -CANCEL ALTER TABLE ROLLUP -FROM db_name.table_name (jobid,...) -``` - -Notice: - -- This command is an asynchronous operation. You need to use `show alter table rollup` to check the task status to confirm whether the execution is successful or not. - -4. Undo the ALTER CLUSTER operation - -grammar: - -``` -(To be implemented...) -``` - -### Example - -1. Undo the ALTER COLUMN operation on my_table. - - [CANCEL ALTER TABLE COLUMN] - -```sql -CANCEL ALTER TABLE COLUMN -FROM example_db.my_table; -``` - -1. Undo the ADD ROLLUP operation under my_table. - - [CANCEL ALTER TABLE ROLLUP] - -```sql -CANCEL ALTER TABLE ROLLUP -FROM example_db.my_table; -``` - -1. Undo the ADD ROLLUP operation under my_table according to the job id. - - [CANCEL ALTER TABLE ROLLUP] - -```sql -CANCEL ALTER TABLE ROLLUP -FROM example_db.my_table(12801,12802); -``` - -### Keywords - - CANCEL, ALTER, TABLE, CANCEL ALTER - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/PAUSE-JOB.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/PAUSE-JOB.md deleted file mode 100644 index 5920c14398a7da..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/PAUSE-JOB.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -{ -"title": "PAUSE-JOB", -"language": "en" -} ---- - - - -## PAUSE-JOB - -### Name - -PAUSE JOB - -### Description - -User suspends a JOB. A stopped job can be resumed with RESUME JOB. - -```sql -PAUSE JOB where jobName = 'job_name'; -``` - -### Example - -1. Pause the job named test1. - - ```sql - PAUSE JOB where jobName = 'test1'; - ``` - -### Keywords - - PAUSE, JOB - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/RESUME-JOB.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/RESUME-JOB.md deleted file mode 100644 index 69608f1416b0b5..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Alter/RESUME-JOB.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ -"title": "RESUME-JOB", -"language": "en" -} ---- - - - -## RESUME-JOB - -### Name - -RESUME JOB - -### Description - -Used to restart a JOB in PAUSE state. The restarted job will continue to be executed periodically. A JOB in STOP state cannot be resumed. - -```sql -RESUME JOB WHERE jobName = 'job_name'; -``` - -### Example - -1. Restart the JOB named test1. - - ```sql - RESUME JOB WHERE jobName = 'test1'; - ``` - -### Keywords - - RESUME, JOB - -### Best Practice - - - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md deleted file mode 100644 index ef5f7a286c9b82..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -{ - "title": "BACKUP", - "language": "en" -} ---- - - - -## BACKUP - -### Name - -BACKUP - -### Description - -This statement is used to back up the data under the specified database. This command is an asynchronous operation. After the submission is successful, you need to check the progress through the SHOW BACKUP command. Only backing up tables of type OLAP is supported. - - Only root or superuser users can create repositories. - -grammar: - -```sql -BACKUP SNAPSHOT [db_name].{snapshot_name} -TO `repository_name` -[ON|EXCLUDE] ( - `table_name` [PARTITION (`p1`, ...)], - ... -) -PROPERTIES ("key"="value", ...); -``` - -illustrate: - -- There can only be one executing BACKUP or RESTORE task under the same database. -- The ON clause identifies the tables and partitions that need to be backed up. If no partition is specified, all partitions of the table are backed up by default -- Tables and partitions that do not require backup are identified in the EXCLUDE clause. Back up all partition data for all tables in this database except the specified table or partition. -- PROPERTIES currently supports the following properties: - - "type" = "full": indicates that this is a full update (default) - - "timeout" = "3600": The task timeout period, the default is one day. in seconds. - -### Example - -1. Fully backup the table example_tbl under example_db to the warehouse example_repo: - -```sql -BACKUP SNAPSHOT example_db.snapshot_label1 -TO example_repo -ON (example_tbl) -PROPERTIES ("type" = "full"); -``` - -2. Under the full backup example_db, the p1, p2 partitions of the table example_tbl, and the table example_tbl2 to the warehouse example_repo: - -```sql -BACKUP SNAPSHOT example_db.snapshot_label2 -TO example_repo -ON -( - example_tbl PARTITION (p1,p2), - example_tbl2 -); -``` - -3. Full backup of all tables except table example_tbl under example_db to warehouse example_repo: - -```sql -BACKUP SNAPSHOT example_db.snapshot_label3 -TO example_repo -EXCLUDE (example_tbl); -``` - -4. Fully back up tables under example_db to the repository example_repo: - -```sql -BACKUP SNAPSHOT example_db.snapshot_label3 -TO example_repo; -``` - -### Keywords - -BACKUP - -### Best Practice - -1. Only one backup operation can be performed under the same database. - -2. The backup operation will back up the underlying table and [materialized view](../../../../query-acceleration/materialized-view.md) of the specified table or partition, and only one copy will be backed up. - -3. Efficiency of backup operations - - The efficiency of backup operations depends on the amount of data, the number of Compute Nodes, and the number of files. Each Compute Node where the backup data shard is located will participate in the upload phase of the backup operation. The greater the number of nodes, the higher the upload efficiency. - - The amount of file data refers only to the number of shards, and the number of files in each shard. If there are many shards, or there are many small files in the shards, the backup operation time may be increased. diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CANCEL-BACKUP.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CANCEL-BACKUP.md deleted file mode 100644 index 9ecbc9b9c25cfa..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CANCEL-BACKUP.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -{ - "title": "CANCEL-BACKUP", - "language": "en" -} ---- - - - -## CANCEL-BACKUP - -### Name - -CANCEL BACKUP - -### Description - -This statement is used to cancel an ongoing BACKUP task. - -grammar: - -```sql -CANCEL BACKUP FROM db_name; -``` - -### Example - -1. Cancel the BACKUP task under example_db. - -```sql -CANCEL BACKUP FROM example_db; -``` - -### Keywords - - CANCEL, BACKUP - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CANCEL-RESTORE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CANCEL-RESTORE.md deleted file mode 100644 index fbc0e9d315c2d8..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CANCEL-RESTORE.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -{ - "title": "CANCEL-RESTORE", - "language": "en" -} ---- - - - -## CANCEL-RESTORE - -### Name - -CANCEL RESTORE - -### Description - -This statement is used to cancel an ongoing RESTORE task. - -grammar: - -```sql -CANCEL RESTORE FROM db_name; -``` - -Notice: - -- When cancellation is around a COMMIT or later stage of recovery, the table being recovered may be rendered inaccessible. At this time, data recovery can only be performed by executing the recovery job again. - -### Example - -1. Cancel the RESTORE task under example_db. - -```sql -CANCEL RESTORE FROM example_db; -``` - -### Keywords - - CANCEL, RESTORE - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md deleted file mode 100644 index f5571c4c441ea2..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -{ - "title": "CREATE-REPOSITORY", - "language": "en" -} ---- - - - -## CREATE-REPOSITORY - -### Name - -CREATE REPOSITORY - -### Description - -This statement is used to create a repository. Repositories are used for backup or restore. Only root or superuser users can create repositories. - -grammar: - -```sql -CREATE [READ ONLY] REPOSITORY `repo_name` -WITH [BROKER `broker_name`|S3|hdfs] -ON LOCATION `repo_location` -PROPERTIES ("key"="value", ...); -``` - -illustrate: - -- Creation of repositories, relying on existing brokers or accessing cloud storage directly through AWS s3 protocol, or accessing HDFS directly. -- If it is a read-only repository, restores can only be done on the repository. If not, backup and restore operations are available. -- PROPERTIES are different according to different types of broker or S3 or hdfs, see the example for details. -- ON LOCATION : if it is S3 , here followed by the Bucket Name. - -### Example - -1. Create a warehouse named bos_repo, rely on BOS broker "bos_broker", and the data root directory is: bos://palo_backup - -```sql -CREATE REPOSITORY `bos_repo` -WITH BROKER `bos_broker` -ON LOCATION "bos://palo_backup" -PROPERTIES -( - "bos_endpoint" = "http://gz.bcebos.com", - "bos_accesskey" = "bos_accesskey", - "bos_secret_accesskey"="bos_secret_accesskey" -); -``` - -2. Create the same repository as Example 1, but with read-only properties: - -```sql -CREATE READ ONLY REPOSITORY `bos_repo` -WITH BROKER `bos_broker` -ON LOCATION "bos://palo_backup" -PROPERTIES -( - "bos_endpoint" = "http://gz.bcebos.com", - "bos_accesskey" = "bos_accesskey", - "bos_secret_accesskey"="bos_accesskey" -); -``` - -3. Create a warehouse named hdfs_repo, rely on Baidu hdfs broker "hdfs_broker", the data root directory is: hdfs://hadoop-name-node:54310/path/to/repo/ - -```sql -CREATE REPOSITORY `hdfs_repo` -WITH BROKER `hdfs_broker` -ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/" -PROPERTIES -( - "username" = "user", - "password" = "password" -); -``` - -4. Create a repository named s3_repo to link cloud storage directly without going through the broker. - -```sql -CREATE REPOSITORY `s3_repo` -WITH S3 -ON LOCATION "s3://s3-repo" -PROPERTIES -( - "s3.endpoint" = "http://s3-REGION.amazonaws.com", - "s3.region" = "s3-REGION", - "s3.access_key" = "AWS_ACCESS_KEY", - "s3.secret_key"="AWS_SECRET_KEY", - "s3.region" = "REGION" -); -``` - -5. Create a repository named hdfs_repo to link HDFS directly without going through the broker. - -```sql -CREATE REPOSITORY `hdfs_repo` -WITH hdfs -ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/" -PROPERTIES -( - "fs.defaultFS"="hdfs://hadoop-name-node:54310", - "hadoop.username"="user" -); - -### Keywords - -``` -6. Create a repository named minio_repo to link minio storage directly through the s3 protocol. - -``` -CREATE REPOSITORY `minio_repo` -WITH S3 -ON LOCATION "s3://minio_repo" -PROPERTIES -( - "s3.endpoint" = "http://minio.com", - "s3.access_key" = "MINIO_USER", - "s3.secret_key"="MINIO_PASSWORD", - "s3.region" = "REGION" - "use_path_style" = "true" -); -``` - - -7. Create a repository named minio_repo via temporary security credentials. - - - -``` -CREATE REPOSITORY `minio_repo` -WITH S3 -ON LOCATION "s3://minio_repo" -PROPERTIES -( - "s3.endpoint" = "AWS_ENDPOINT", - "s3.access_key" = "AWS_TEMP_ACCESS_KEY", - "s3.secret_key" = "AWS_TEMP_SECRET_KEY", - "s3.session_token" = "AWS_TEMP_TOKEN", - "s3.region" = "AWS_REGION" -) -``` - -8. Create repository using Tencent COS - -``` -CREATE REPOSITORY `cos_repo` -WITH S3 -ON LOCATION "s3://backet1/" -PROPERTIES -( - "s3.access_key" = "ak", - "s3.secret_key" = "sk", - "s3.endpoint" = "http://cos.ap-beijing.myqcloud.com", - "s3.region" = "ap-beijing" -); -``` - -9. Create repository and delete snapshots if exists. - -```sql -CREATE REPOSITORY `s3_repo` -WITH S3 -ON LOCATION "s3://s3-repo" -PROPERTIES -( - "s3.endpoint" = "http://s3-REGION.amazonaws.com", - "s3.region" = "s3-REGION", - "s3.access_key" = "AWS_ACCESS_KEY", - "s3.secret_key"="AWS_SECRET_KEY", - "s3.region" = "REGION", - "delete_if_exists" = "true" -); -``` - -Note: only the s3 service supports the "delete_if_exists" property. - -### Keywords - - CREATE, REPOSITORY - -### Best Practice - -1. A cluster can create multiple warehouses. Only users with ADMIN privileges can create repositories. -2. Any user can view the created repositories through the [SHOW REPOSITORIES](../../Show-Statements/SHOW-REPOSITORIES.md) command. -3. When performing data migration operations, it is necessary to create the exact same warehouse in the source cluster and the destination cluster, so that the destination cluster can view the data snapshots backed up by the source cluster through this warehouse. diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/DROP-REPOSITORY.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/DROP-REPOSITORY.md deleted file mode 100644 index 6a7e6e5a66a1dd..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/DROP-REPOSITORY.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -{ - "title": "DROP-REPOSITORY", - "language": "en" -} ---- - - - -## DROP-REPOSITORY - -### Name - -DROP REPOSITORY - -### Description - -This statement is used to delete a created repository. Only root or superuser users can delete repositories. - -grammar: - -```sql -DROP REPOSITORY `repo_name`; -``` - -illustrate: - -- Deleting a warehouse just deletes the warehouse's mapping in Palo, not the actual warehouse data. Once deleted, it can be mapped to the repository again by specifying the same broker and LOCATION. - -### Example - -1. Delete the repository named bos_repo: - -```sql -DROP REPOSITORY `bos_repo`; -``` - -### Keywords - - DROP, REPOSITORY - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/RESTORE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/RESTORE.md deleted file mode 100644 index d176816a3cdbda..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/RESTORE.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -{ - "title": "RESTORE", - "language": "en" -} ---- - - - -## RESTORE - -### Name - -RESTORE - -### Description - -This statement is used to restore the data backed up by the BACKUP command to the specified database. This command is an asynchronous operation. After the submission is successful, you need to check the progress through the SHOW RESTORE command. Restoring tables of type OLAP is only supported. - -grammar: - -```sql -RESTORE SNAPSHOT [db_name].{snapshot_name} -FROM `repository_name` -[ON|EXCLUDE] ( - `table_name` [PARTITION (`p1`, ...)] [AS `tbl_alias`], - ... -) -PROPERTIES ("key"="value", ...); -``` - -illustrate: - -- There can only be one executing BACKUP or RESTORE task under the same database. -- The tables and partitions that need to be restored are identified in the ON clause. If no partition is specified, all partitions of the table are restored by default. The specified table and partition must already exist in the warehouse backup. -- Tables and partitions that do not require recovery are identified in the EXCLUDE clause. All partitions of all other tables in the warehouse except the specified table or partition will be restored. -- The table name backed up in the warehouse can be restored to a new table through the AS statement. But the new table name cannot already exist in the database. The partition name cannot be modified. -- You can restore the backed up tables in the warehouse to replace the existing tables of the same name in the database, but you must ensure that the table structures of the two tables are exactly the same. The table structure includes: table name, column, partition, Rollup, etc. -- You can specify some partitions of the recovery table, and the system will check whether the partition Range or List can match. -- PROPERTIES currently supports the following properties: - - "backup_timestamp" = "2018-05-04-16-45-08": Specifies which time version of the corresponding backup to restore, required. This information can be obtained with the `SHOW SNAPSHOT ON repo;` statement. - - "replication_num" = "3": Specifies the number of replicas for the restored table or partition. Default is 3. If restoring an existing table or partition, the number of replicas must be the same as the number of replicas of the existing table or partition. At the same time, there must be enough hosts to accommodate multiple replicas. - - "reserve_replica" = "true": Default is false. When this property is true, the replication_num property is ignored and the restored table or partition will have the same number of replication as before the backup. Supports multiple tables or multiple partitions within a table with different replication number. - - "reserve_dynamic_partition_enable" = "true": Default is false. When this property is true, the restored table will have the same value of 'dynamic_partition_enable' as before the backup. if this property is not true, the restored table will set 'dynamic_partition_enable=false'. - - "timeout" = "3600": The task timeout period, the default is one day. in seconds. - - "meta_version" = 40: Use the specified meta_version to read the previously backed up metadata. Note that this parameter is used as a temporary solution and is only used to restore the data backed up by the old version of Doris. The latest version of the backup data already contains the meta version, no need to specify it. - -### Example - -1. Restore the table backup_tbl in backup snapshot_1 from example_repo to database example_db1, the time version is "2018-05-04-16-45-08". Revert to 1 copy: - -```sql -RESTORE SNAPSHOT example_db1.`snapshot_1` -FROM `example_repo` -ON ( `backup_tbl` ) -PROPERTIES -( - "backup_timestamp"="2018-05-04-16-45-08", - "replication_num" = "1" -); -``` - -2. Restore partitions p1, p2 of table backup_tbl in backup snapshot_2 from example_repo, and table backup_tbl2 to database example_db1, rename it to new_tbl, and the time version is "2018-05-04-17-11-01". The default reverts to 3 replicas: - -```sql -RESTORE SNAPSHOT example_db1.`snapshot_2` -FROM `example_repo` -ON -( - `backup_tbl` PARTITION (`p1`, `p2`), - `backup_tbl2` AS `new_tbl` -) -PROPERTIES -( - "backup_timestamp"="2018-05-04-17-11-01" -); -``` - -3. Restore all tables except for table backup_tbl in backup snapshot_3 from example_repo to database example_db1, the time version is "2018-05-04-18-12-18". - -```sql -RESTORE SNAPSHOT example_db1.`snapshot_3` -FROM `example_repo` -EXCLUDE ( `backup_tbl` ) -PROPERTIES -( - "backup_timestamp"="2018-05-04-18-12-18" -); -``` - -### Keywords - -``` -RESTORE -``` - -### Best Practice - -1. There can only be one ongoing recovery operation under the same database. - -2. The table backed up in the warehouse can be restored and replaced with the existing table of the same name in the database, but the table structure of the two tables must be completely consistent. The table structure includes: table name, columns, partitions, materialized views, and so on. - -3. When specifying a partial partition of the recovery table, the system will check whether the partition range can match. - -4. Efficiency of recovery operations: - - In the case of the same cluster size, the time-consuming of the restore operation is basically the same as the time-consuming of the backup operation. If you want to speed up the recovery operation, you can first restore only one copy by setting the `replication_num` parameter, and then adjust the number of copies by [ALTER TABLE PROPERTY](../../Data-Definition-Statements/Alter/ALTER-TABLE-PROPERTY.md), complete the copy. diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-ASYNC-MATERIALIZED-VIEW.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-ASYNC-MATERIALIZED-VIEW.md deleted file mode 100644 index cf21b31a747a5a..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-ASYNC-MATERIALIZED-VIEW.md +++ /dev/null @@ -1,231 +0,0 @@ ---- -{ - "title": "CREATE-ASYNC-MATERIALIZED-VIEW", - "language": "en" -} ---- - - - -## CREATE-ASYNC-MATERIALIZED-VIEW - -### Name - -CREATE ASYNC MATERIALIZED VIEW - -### Description - -This statement is used to create an asynchronous materialized view. - -#### syntax - -```sql -CREATE MATERIALIZED VIEW (IF NOT EXISTS)? mvName=multipartIdentifier - (LEFT_PAREN cols=simpleColumnDefs RIGHT_PAREN)? buildMode? - (REFRESH refreshMethod? refreshTrigger?)? - (KEY keys=identifierList)? - (COMMENT STRING_LITERAL)? - (PARTITION BY LEFT_PAREN partitionKey = identifier RIGHT_PAREN)? - (DISTRIBUTED BY (HASH hashKeys=identifierList | RANDOM) (BUCKETS (INTEGER_VALUE | AUTO))?)? - propertyClause? - AS query -``` - -#### illustrate - -##### simpleColumnDefs - -Used to define the materialized view column information, if not defined, it will be automatically derived - -```sql -simpleColumnDefs -: cols+=simpleColumnDef (COMMA cols+=simpleColumnDef)* - ; - -simpleColumnDef -: colName=identifier (COMMENT comment=STRING_LITERAL)? - ; -``` - -For example, define two columns aa and bb, where the annotation for aa is "name" -```sql -CREATE MATERIALIZED VIEW mv1 -(aa comment "name",bb) -``` - -##### buildMode - -Used to define whether the materialized view is refreshed immediately after creation, default to IMMEDIATE - -IMMEDIATE:Refresh Now - -DEFERRED:Delay refresh - -```sql -buildMode -: BUILD (IMMEDIATE | DEFERRED) -; -``` - -For example, specifying the materialized view to refresh immediately - -```sql -CREATE MATERIALIZED VIEW mv1 -BUILD IMMEDIATE -``` - -##### refreshMethod - -Used to define the refresh method for materialized views, default to AUTO - -COMPLETE:Full refresh - -AUTO:Try to refresh incrementally as much as possible. If incremental refresh is not possible, refresh in full - -```sql -refreshMethod -: COMPLETE | AUTO -; -``` - -For example, specifying full refresh of materialized views -```sql -CREATE MATERIALIZED VIEW mv1 -REFRESH COMPLETE -``` - -##### refreshTrigger - -Trigger method for refreshing data in materialized views, default to MANUAL - -MANUAL:Manual refresh - -SCHEDULE:Timed refresh - -```sql -refreshTrigger -: ON MANUAL -| ON SCHEDULE refreshSchedule -; - -refreshSchedule -: EVERY INTEGER_VALUE mvRefreshUnit (STARTS STRING_LITERAL)? -; - -mvRefreshUnit -: MINUTE | HOUR | DAY | WEEK -; -``` - -For example: executed every 2 hours, starting from 21:07:09 on December 13, 2023 -```sql -CREATE MATERIALIZED VIEW mv1 -REFRESH ON SCHEDULE EVERY 2 HOUR STARTS "2023-12-13 21:07:09" -``` - -##### key -The materialized view is the DUPLICATE KEY model, therefore the specified columns are arranged in sequence - -```sql -identifierList -: LEFT_PAREN identifierSeq RIGHT_PAREN - ; - -identifierSeq -: ident+=errorCapturingIdentifier (COMMA ident+=errorCapturingIdentifier)* -; -``` - -For example, specifying k1 and k2 as sorting sequences -```sql -CREATE MATERIALIZED VIEW mv1 -KEY(k1,k2) -``` - -##### partition -There are two types of partitioning methods for materialized views. If no partitioning is specified, there will be a default single partition. If a partitioning field is specified, the system will automatically deduce the source base table of that field and synchronize all partitions of the base table (currently supporting `OlapTable` and `hive`). (Limitation: If the base table is an `OlapTable`, it can only have one partition field) - -For example, if the base table is a range partition with a partition field of `create_time` and partitioning by day, and `partition by(ct) as select create_time as ct from t1` is specified when creating a materialized view, -then the materialized view will also be a range partition with a partition field of 'ct' and partitioning by day - -The selection of partition fields and the definition of materialized views must meet the following constraints to be successfully created; -otherwise, an error "Unable to find a suitable base table for partitioning" will occur: - -- At least one of the base tables used by the materialized view must be a partitioned table. -- Partitioned tables used by the materialized view must employ list or range partitioning strategies. -- The top-level partition column in the materialized view can only have one partition field. -- The SQL of the materialized view needs to use partition columns from the base table. -- If GROUP BY is used, the partition column fields must be after the GROUP BY. -- If window functions are used, the partition column fields must be after the PARTITION BY. -- Data changes should occur on partitioned tables. If they occur on non-partitioned tables, the materialized view needs to be fully rebuilt. -- Using the fields that generate nulls in the JOIN as partition fields in the materialized view prohibits partition incremental updates. -- The base table partition table referenced by the materialized view currently only supports internal tables and HIVE tables. The attribute of the partition column of the inner table cannot be NULL. The HIVE table allows NULL. - - -#### property -The materialized view can specify both the properties of the table and the properties unique to the materialized view. - -The properties unique to materialized views include: - -`grace_period`: When performing query rewrites, there is a maximum allowed delay time (measured in seconds) for the data of the materialized view. If there is a discrepancy between the data of partition A and the base table, and the last refresh time of partition A of the materialized view was 1, while the current system time is 2, then this partition will not undergo transparent rewriting. However, if the grace_period is greater than or equal to 1, this partition will be used for transparent rewriting. - -`excluded_trigger_tables`: Table names ignored during data refresh, separated by commas. For example, ` table1, table2` - -`refresh_partition_num`: The number of partitions refreshed by a single insert statement is set to 1 by default. When refreshing a materialized view, the system first calculates the list of partitions to be refreshed and then splits it into multiple insert statements that are executed in sequence according to this configuration. If any insert statement fails, the entire task will stop executing. The materialized view ensures the transactionality of individual insert statements, meaning that failed insert statements will not affect partitions that have already been successfully refreshed. - -`workload_group`: The name of the workload_group used by the materialized view when performing refresh tasks. This is used to limit the resources used for refreshing data in the materialized view, in order to avoid affecting the operation of other business processes. For details on how to create and use workload_group, refer to [WORKLOAD-GROUP](../../../../admin-manual/workload-group.md) - -##### query - -Create a query statement for the materialized view, and the result is the data in the materialized view - -Random functions are not supported, for example: -```sql -SELECT random() as dd,k3 FROM user -``` - -### Example - -1. Create a materialized view mv1 that refreshes immediately and then once a week, with the data source being the hive catalog - - ```sql - CREATE MATERIALIZED VIEW mv1 BUILD IMMEDIATE REFRESH COMPLETE ON SCHEDULE EVERY 1 WEEK - DISTRIBUTED BY RANDOM BUCKETS 2 - PROPERTIES ( - "replication_num" = "1" - ) - AS SELECT * FROM hive_catalog.db1.user; - ``` - -2. Create a materialized view with multiple table joins - - ```sql - CREATE MATERIALIZED VIEW mv1 BUILD IMMEDIATE REFRESH COMPLETE ON SCHEDULE EVERY 1 WEEK - DISTRIBUTED BY RANDOM BUCKETS 2 - PROPERTIES ( - "replication_num" = "1" - ) - AS select user.k1,user.k3,com.k4 from user join com on user.k1=com.k1; - ``` - -### Keywords - - CREATE, ASYNC, MATERIALIZED, VIEW - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md deleted file mode 100644 index c3d59ccc4a3280..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-CATALOG.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -{ - "title": "CREATE-CATALOG", - "language": "en" -} ---- - - - -## CREATE-CATALOG - -### Name - -CREATE CATALOG - -### Description - -This statement is used to create an external catalog - -Syntax: - -```sql -CREATE CATALOG [IF NOT EXISTS] catalog_name [comment] - PROPERTIES ("key"="value", ...); -``` - -### Example - -1. Create catalog hive - - ```sql - CREATE CATALOG hive comment 'hive catalog' PROPERTIES ( - 'type'='hms', - 'hive.metastore.uris' = 'thrift://127.0.0.1:7004', - 'dfs.nameservices'='HANN', - 'dfs.ha.namenodes.HANN'='nn1,nn2', - 'dfs.namenode.rpc-address.HANN.nn1'='nn1_host:rpc_port', - 'dfs.namenode.rpc-address.HANN.nn2'='nn2_host:rpc_port', - 'dfs.client.failover.proxy.provider.HANN'='org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider' - ); - ``` - -2. Create catalog es - - ```sql - CREATE CATALOG es PROPERTIES ( - "type"="es", - "hosts"="http://127.0.0.1:9200" - ); - ``` - -3. Create catalog jdbc - - **mysql** - - ```sql - CREATE CATALOG jdbc PROPERTIES ( - "type"="jdbc", - "user"="root", - "password"="123456", - "jdbc_url" = "jdbc:mysql://127.0.0.1:3316/doris_test?useSSL=false", - "driver_url" = "https://doris-community-test-1308700295.cos.ap-hongkong.myqcloud.com/jdbc_driver/mysql-connector-java-8.0.25.jar", - "driver_class" = "com.mysql.cj.jdbc.Driver" - ); - ``` - - **postgresql** - - ```sql - CREATE CATALOG jdbc PROPERTIES ( - "type"="jdbc", - "user"="postgres", - "password"="123456", - "jdbc_url" = "jdbc:postgresql://127.0.0.1:5432/demo", - "driver_url" = "file:///path/to/postgresql-42.5.1.jar", - "driver_class" = "org.postgresql.Driver" - ); - ``` - - **clickhouse** - - ```sql - CREATE CATALOG jdbc PROPERTIES ( - "type"="jdbc", - "user"="default", - "password"="123456", - "jdbc_url" = "jdbc:clickhouse://127.0.0.1:8123/demo", - "driver_url" = "file:///path/to/clickhouse-jdbc-0.3.2-patch11-all.jar", - "driver_class" = "com.clickhouse.jdbc.ClickHouseDriver" - ) - ``` - - **oracle** - ```sql - CREATE CATALOG jdbc PROPERTIES ( - "type"="jdbc", - "user"="doris", - "password"="123456", - "jdbc_url" = "jdbc:oracle:thin:@127.0.0.1:1521:helowin", - "driver_url" = "file:///path/to/ojdbc8.jar", - "driver_class" = "oracle.jdbc.driver.OracleDriver" - ); - ``` - - **SQLServer** - ```sql - CREATE CATALOG sqlserver_catalog PROPERTIES ( - "type"="jdbc", - "user"="SA", - "password"="Doris123456", - "jdbc_url" = "jdbc:sqlserver://localhost:1433;DataBaseName=doris_test", - "driver_url" = "file:///path/to/mssql-jdbc-11.2.3.jre8.jar", - "driver_class" = "com.microsoft.sqlserver.jdbc.SQLServerDriver" - ); - ``` - - **SAP HANA** - ```sql - CREATE CATALOG saphana_catalog PROPERTIES ( - "type"="jdbc", - "user"="SYSTEM", - "password"="SAPHANA", - "jdbc_url" = "jdbc:sap://localhost:31515/TEST", - "driver_url" = "file:///path/to/ngdbc.jar", - "driver_class" = "com.sap.db.jdbc.Driver" - ); - ``` - - **Trino** - ```sql - CREATE CATALOG trino_catalog PROPERTIES ( - "type"="jdbc", - "user"="hadoop", - "password"="", - "jdbc_url" = "jdbc:trino://localhost:8080/hive", - "driver_url" = "file:///path/to/trino-jdbc-389.jar", - "driver_class" = "io.trino.jdbc.TrinoDriver" - ); - ``` - - **OceanBase** - ```sql - CREATE CATALOG oceanbase_catalog PROPERTIES ( - "type"="jdbc", - "user"="root", - "password"="", - "jdbc_url" = "jdbc:oceanbase://localhost:2881/demo", - "driver_url" = "file:///path/to/oceanbase-client-2.4.2.jar", - "driver_class" = "com.oceanbase.jdbc.Driver" - ); - ``` - -### Keywords - -CREATE, CATALOG - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-DATABASE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-DATABASE.md deleted file mode 100644 index 49b7cf0537b9bc..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-DATABASE.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -{ - "title": "CREATE-DATABASE", - "language": "en" -} ---- - - - -## CREATE-DATABASE - -### Name - -CREATE DATABASE - -### Description - -This statement is used to create a new database (database) - -grammar: - -```sql -CREATE DATABASE [IF NOT EXISTS] db_name - [PROPERTIES ("key"="value", ...)]; -```` - -`PROPERTIES` Additional information about the database, which can be defaulted. - -- If you want to specify the default replica distribution for tables in db, you need to specify `replication_allocation` (the `replication_allocation` attribute of table will have higher priority than db) - - ```sql - PROPERTIES ( - "replication_allocation" = "tag.location.default:3" - ) - ``` - -### Example - -1. Create a new database db_test - - ```sql - CREATE DATABASE db_test; - ```` - -2. Create a new database with default replica distribution: - - ```sql - CREATE DATABASE `iceberg_test` - PROPERTIES ( - "replication_allocation" = "tag.location.group_1:3" - ); - ```` - -### Keywords - -````text -CREATE, DATABASE -```` - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-ENCRYPT-KEY.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-ENCRYPT-KEY.md deleted file mode 100644 index 604e1799085735..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-ENCRYPT-KEY.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -{ - "title": "CREATE-ENCRYPT-KEY", - "language": "en" -} ---- - - - -## CREATE-ENCRYPT-KEY - -### Name - -CREATE ENCRYPTKEY - -### Description - -This statement creates a custom key. Executing this command requires the user to have `ADMIN` privileges. - -grammar: - -```sql -CREATE ENCRYPTKEY key_name AS "key_string" -```` - -illustrate: - -`key_name`: The name of the key to be created, may contain the name of the database. For example: `db1.my_key`. - -`key_string`: The string to create the key with. - -If `key_name` contains the database name, then the custom key will be created in the corresponding database, otherwise this function will create the database in the current session. The name of the new key cannot be the same as the existing key in the corresponding database, otherwise the creation will fail. - -### Example - -1. Create a custom key - - ```sql - CREATE ENCRYPTKEY my_key AS "ABCD123456789"; - ```` - -2. Use a custom key - - To use a custom key, you need to add the keyword `KEY`/`key` before the key, separated from the `key_name` space. - - ```sql - mysql> SELECT HEX(AES_ENCRYPT("Doris is Great", KEY my_key)); - +------------------------------------------------+ - | hex(aes_encrypt('Doris is Great', key my_key)) | - +------------------------------------------------+ - | D26DB38579D6A343350EDDC6F2AD47C6 | - +------------------------------------------------+ - 1 row in set (0.02 sec) - - mysql> SELECT AES_DECRYPT(UNHEX('D26DB38579D6A343350EDDC6F2AD47C6'), KEY my_key); - +------------------------------------------------- -------------------+ - | aes_decrypt(unhex('D26DB38579D6A343350EDDC6F2AD47C6'), key my_key) | - +------------------------------------------------- -------------------+ - | Doris is Great | - +------------------------------------------------- -------------------+ - 1 row in set (0.01 sec) - ```` - -### Keywords - - CREATE, ENCRYPTKEY - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-EXTERNAL-TABLE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-EXTERNAL-TABLE.md deleted file mode 100644 index 80417979639d39..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-EXTERNAL-TABLE.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -{ - "title": "CREATE-EXTERNAL-TABLE", - "language": "en" -} ---- - - - -## CREATE-EXTERNAL-TABLE - -### Name - -CREATE EXTERNAL TABLE - -### Description - -This statement is used to create an external table, see [CREATE TABLE](./CREATE-TABLE.md) for the specific syntax. - -Which type of external table is mainly identified by the ENGINE type, currently MYSQL, BROKER, HIVE, ICEBERG, HUDI are optional - -1. If it is mysql, you need to provide the following information in properties: - - ```sql - PROPERTIES ( - "host" = "mysql_server_host", - "port" = "mysql_server_port", - "user" = "your_user_name", - "password" = "your_password", - "database" = "database_name", - "table" = "table_name" - ) - ```` - and there is an optional propertiy "charset" which can set character fom mysql connection, default value is "utf8". You can set another value "utf8mb4" instead of "utf8" when you need. - - Notice: - - - "table_name" in "table" entry is the real table name in mysql. The table_name in the CREATE TABLE statement is the name of the mysql table in Doris, which can be different. - - - The purpose of creating a mysql table in Doris is to access the mysql database through Doris. Doris itself does not maintain or store any mysql data. - -2. If it is a broker, it means that the access to the table needs to pass through the specified broker, and the following information needs to be provided in properties: - - ```sql - PROPERTIES ( - "broker_name" = "broker_name", - "path" = "file_path1[,file_path2]", - "column_separator" = "value_separator" - "line_delimiter" = "value_delimiter" - ) - ```` - - In addition, you need to provide the Property information required by the Broker, and pass it through the BROKER PROPERTIES, for example, HDFS needs to pass in - - ```sql - BROKER PROPERTIES( - "username" = "name", - "password" = "password" - ) - ```` - - According to different Broker types, the content that needs to be passed in is also different. - - Notice: - - - If there are multiple files in "path", separate them with comma [,]. If the filename contains a comma, use %2c instead. If the filename contains %, use %25 instead - - Now the file content format supports CSV, and supports GZ, BZ2, LZ4, LZO (LZOP) compression formats. - -3. If it is hive, you need to provide the following information in properties: - - ```sql - PROPERTIES ( - "database" = "hive_db_name", - "table" = "hive_table_name", - "hive.metastore.uris" = "thrift://127.0.0.1:9083" - ) - ```` - - Where database is the name of the library corresponding to the hive table, table is the name of the hive table, and hive.metastore.uris is the address of the hive metastore service. - -4. In case of iceberg, you need to provide the following information in properties: - - ```sql - PROPERTIES ( - "iceberg.database" = "iceberg_db_name", - "iceberg.table" = "iceberg_table_name", - "iceberg.hive.metastore.uris" = "thrift://127.0.0.1:9083", - "iceberg.catalog.type" = "HIVE_CATALOG" - ) - ```` - - Where database is the library name corresponding to Iceberg; - table is the corresponding table name in Iceberg; - hive.metastore.uris is the hive metastore service address; - catalog.type defaults to HIVE_CATALOG. Currently only HIVE_CATALOG is supported, more Iceberg catalog types will be supported in the future. - -5. In case of hudi, you need to provide the following information in properties: - - ```sql - PROPERTIES ( - "hudi.database" = "hudi_db_in_hive_metastore", - "hudi.table" = "hudi_table_in_hive_metastore", - "hudi.hive.metastore.uris" = "thrift://127.0.0.1:9083" - ) - ```` - - Where hudi.database is the corresponding database name in HiveMetaStore; - hudi.table is the corresponding table name in HiveMetaStore; - hive.metastore.uris is the hive metastore service address; - -### Example - -1. Create a MYSQL external table - - Create mysql table directly from outer table information - - ```sql - CREATE EXTERNAL TABLE example_db.table_mysql - ( - k1 DATE, - k2 INT, - k3 SMALLINT, - k4 VARCHAR(2048), - k5 DATETIME - ) - ENGINE=mysql - PROPERTIES - ( - "host" = "127.0.0.1", - "port" = "8239", - "user" = "mysql_user", - "password" = "mysql_passwd", - "database" = "mysql_db_test", - "table" = "mysql_table_test", - "charset" = "utf8mb4" - ) - ```` - - Create mysql table through External Catalog Resource - - ```sql - # Create Resource first - CREATE EXTERNAL RESOURCE "mysql_resource" - PROPERTIES - ( - "type" = "odbc_catalog", - "user" = "mysql_user", - "password" = "mysql_passwd", - "host" = "127.0.0.1", - "port" = "8239" - ); - - # Then create mysql external table through Resource - CREATE EXTERNAL TABLE example_db.table_mysql - ( - k1 DATE, - k2 INT, - k3 SMALLINT, - k4 VARCHAR(2048), - k5 DATETIME - ) - ENGINE=mysql - PROPERTIES - ( - "odbc_catalog_resource" = "mysql_resource", - "database" = "mysql_db_test", - "table" = "mysql_table_test" - ) - ```` - -2. Create a broker external table with data files stored on HDFS, the data is split with "|", and "\n" is newline - - ```sql - CREATE EXTERNAL TABLE example_db.table_broker ( - k1 DATE, - k2 INT, - k3 SMALLINT, - k4 VARCHAR(2048), - k5 DATETIME - ) - ENGINE=broker - PROPERTIES ( - "broker_name" = "hdfs", - "path" = "hdfs://hdfs_host:hdfs_port/data1,hdfs://hdfs_host:hdfs_port/data2,hdfs://hdfs_host:hdfs_port/data3%2c4", - "column_separator" = "|", - "line_delimiter" = "\n" - ) - BROKER PROPERTIES ( - "username" = "hdfs_user", - "password" = "hdfs_password" - ) - ```` - -3. Create a hive external table - - ```sql - CREATE TABLE example_db.table_hive - ( - k1 TINYINT, - k2 VARCHAR(50), - v INT - ) - ENGINE=hive - PROPERTIES - ( - "database" = "hive_db_name", - "table" = "hive_table_name", - "hive.metastore.uris" = "thrift://127.0.0.1:9083" - ); - ```` - -4. Create an Iceberg skin - - ```sql - CREATE TABLE example_db.t_iceberg - ENGINE=ICEBERG - PROPERTIES ( - "iceberg.database" = "iceberg_db", - "iceberg.table" = "iceberg_table", - "iceberg.hive.metastore.uris" = "thrift://127.0.0.1:9083", - "iceberg.catalog.type" = "HIVE_CATALOG" - ); - ```` - -5. Create an Hudi external table - - create hudi table without schema(recommend) - ```sql - CREATE TABLE example_db.t_hudi - ENGINE=HUDI - PROPERTIES ( - "hudi.database" = "hudi_db_in_hive_metastore", - "hudi.table" = "hudi_table_in_hive_metastore", - "hudi.hive.metastore.uris" = "thrift://127.0.0.1:9083" - ); - ```` - - create hudi table with schema - ```sql - CREATE TABLE example_db.t_hudi ( - `id` int NOT NULL COMMENT "id number", - `name` varchar(10) NOT NULL COMMENT "user name" - ) - ENGINE=HUDI - PROPERTIES ( - "hudi.database" = "hudi_db_in_hive_metastore", - "hudi.table" = "hudi_table_in_hive_metastore", - "hudi.hive.metastore.uris" = "thrift://127.0.0.1:9083" - ); - ```` - -### Keywords - - CREATE, EXTERNAL, TABLE - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-FILE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-FILE.md deleted file mode 100644 index b8e79e750e88db..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-FILE.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -{ - "title": "CREATE-FILE", - "language": "en" -} ---- - - - -## CREATE-FILE - -### Name - -CREATE FILE - -### Description - -This statement is used to create and upload a file to the Doris cluster. -This function is usually used to manage files that need to be used in some other commands, such as certificates, public and private keys, and so on. - -This command can only be executed by users with `admin` privileges. -A certain file belongs to a certain database. This file can be used by any user with access rights to database. - -A single file size is limited to 1MB. -A Doris cluster can upload up to 100 files. - -grammar: - -```sql -CREATE FILE "file_name" [IN database] -PROPERTIES("key"="value", ...) -```` - -illustrate: - -- file_name: custom file name. -- database: The file belongs to a certain db, if not specified, the db of the current session is used. -- properties supports the following parameters: - - url: Required. Specifies the download path for a file. Currently only unauthenticated http download paths are supported. After the command executes successfully, the file will be saved in doris and the url will no longer be needed. - - catalog: Required. The classification name of the file can be customized. However, in some commands, files in the specified catalog are looked up. For example, in the routine import, when the data source is kafka, the file under the catalog name kafka will be searched. - - md5: optional. md5 of the file. If specified, verification will be performed after the file is downloaded. - -### Example - -1. Create a file ca.pem , classified as kafka - - ```sql - CREATE FILE "ca.pem" - PROPERTIES - ( - "url" = "https://test.bj.bcebos.com/kafka-key/ca.pem", - "catalog" = "kafka" - ); - ```` - -2. Create a file client.key, classified as my_catalog - - ```sql - CREATE FILE "client.key" - IN my_database - PROPERTIES - ( - "url" = "https://test.bj.bcebos.com/kafka-key/client.key", - "catalog" = "my_catalog", - "md5" = "b5bb901bf10f99205b39a46ac3557dd9" - ); - ```` - -### Keywords - -````text -CREATE, FILE -```` - -### Best Practice - -1. This command can only be executed by users with amdin privileges. A certain file belongs to a certain database. This file can be used by any user with access rights to database. - -2. File size and quantity restrictions. - - This function is mainly used to manage some small files such as certificates. So a single file size is limited to 1MB. A Doris cluster can upload up to 100 files. - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-FUNCTION.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-FUNCTION.md deleted file mode 100644 index e619870d6543a5..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-FUNCTION.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -{ - "title": "CREATE-FUNCTION", - "language": "en" -} ---- - - - -## CREATE-FUNCTION - -### Name - -CREATE FUNCTION - -### Description - -This statement creates a custom function. Executing this command requires the user to have `ADMIN` privileges. - -If `function_name` contains the database name, then the custom function will be created in the corresponding database, otherwise the function will be created in the database where the current session is located. The name and parameters of the new function cannot be the same as the existing functions in the current namespace, otherwise the creation will fail. But only with the same name and different parameters can be created successfully. - -grammar: - -```sql -CREATE [GLOBAL] [AGGREGATE] [ALIAS] FUNCTION function_name - (arg_type [, ...]) - [RETURNS ret_type] - [INTERMEDIATE inter_type] - [WITH PARAMETER(param [,...]) AS origin_function] - [PROPERTIES ("key" = "value" [, ...]) ] -```` - -Parameter Description: - -- `GLOBAL`: If there is this item, it means that the created function is a global function. - -- `AGGREGATE`: If there is this item, it means that the created function is an aggregate function. - - -- `ALIAS`: If there is this item, it means that the created function is an alias function. - - - If the above two items are absent, it means that the created function is a scalar function - -- `function_name`: The name of the function to be created, which can include the name of the database. For example: `db1.my_func`. - - -- `arg_type`: The parameter type of the function, which is the same as the type defined when creating the table. Variable-length parameters can be represented by `, ...`. If it is a variable-length type, the type of the variable-length parameter is the same as that of the last non-variable-length parameter. - - **NOTE**: `ALIAS FUNCTION` does not support variable-length arguments and must have at least one argument. - -- `ret_type`: Required for creating new functions. If you are aliasing an existing function, you do not need to fill in this parameter. - - -- `inter_type`: The data type used to represent the intermediate stage of the aggregation function. - - -- `param`: used to represent the parameter of the alias function, including at least one. - - -- `origin_function`: used to represent the original function corresponding to the alias function. - - -- `properties`: Used to set function-related properties, the properties that can be set include: - - - `file`: Indicates the jar package containing the user UDF. In a multi-machine environment, you can also use http to download the jar package. This parameter is mandatory. - - - `symbol`: Indicates the name of the class containing the UDF class. This parameter must be set - - - `type`: Indicates the UDF call type, the default is Native, and JAVA_UDF is passed when using Java UDF. - - - `always_nullable`: Indicates whether NULL values may appear in the UDF return result, is an optional parameter, and the default value is true. - - -### Example - -1. Create a custom UDF function - - ```sql - CREATE FUNCTION java_udf_add_one(int) RETURNS int PROPERTIES ( - "file"="file:///path/to/java-udf-demo-jar-with-dependencies.jar", - "symbol"="org.apache.doris.udf.AddOne", - "always_nullable"="true", - "type"="JAVA_UDF" - ); - ``` - - -2. Create a custom UDAF function - - ```sql - CREATE AGGREGATE FUNCTION simple_sum(INT) RETURNS INT PROPERTIES ( - "file"="file:///pathTo/java-udaf.jar", - "symbol"="org.apache.doris.udf.demo.SimpleDemo", - "always_nullable"="true", - "type"="JAVA_UDF" - ); - ``` - -3. Create a custom alias function - - ```sql - CREATE ALIAS FUNCTION id_masking(INT) WITH PARAMETER(id) AS CONCAT(LEFT(id, 3), '****', RIGHT(id, 4)); - ``` - -4. Create a global custom alias function - - ```sql - CREATE GLOBAL ALIAS FUNCTION id_masking(INT) WITH PARAMETER(id) AS CONCAT(LEFT(id, 3), '****', RIGHT(id, 4)); - ``` - -### Keywords - - CREATE, FUNCTION - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-INDEX.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-INDEX.md deleted file mode 100644 index 016034b52e0264..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-INDEX.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ - "title": "CREATE-INDEX", - "language": "en" -} ---- - - - -## CREATE-INDEX - -### Name - -CREATE INDEX - -### Description - -This statement is used to create an index -grammar: - -```sql -CREATE INDEX [IF NOT EXISTS] index_name ON table_name (column [, ...],) [USING INVERTED] [COMMENT 'balabala']; -```` -Notice: -- INVERTED indexes are only created on a single column - -### Example - -1. Create a inverted index for siteid on table1 - - ```sql - CREATE INDEX [IF NOT EXISTS] index_name ON table1 (siteid) USING INVERTED COMMENT 'balabala'; - ```` - - -### Keywords - -````text -CREATE, INDEX -```` - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-JOB.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-JOB.md deleted file mode 100644 index 9407d031b80310..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-JOB.md +++ /dev/null @@ -1,167 +0,0 @@ ---- -{ -"title": "CREATE-JOB", -"language": "en" -} - ---- - - -## CREATE-JOB - -### Name - -CREATE JOB - -### Description - -Doris Job is a task that runs according to a predefined schedule, triggering predefined actions at specific times or intervals to help automate certain tasks. In terms of functionality, it is similar to scheduled tasks on operating systems (e.g., cron in Linux, scheduled tasks in Windows).↳ - -There are two types of Jobs: ONE_TIME and RECURRING. The ONE_TIME type of Job triggers at a specified time point and is mainly used for one-time tasks, while the RECURRING type of Job triggers at specified time intervals and is used for periodically recurring tasks. The RECURRING type of Job can specify a start time and an end time using STARTS\ENDS. If the start time is not specified, the first execution time is set to the current time plus one scheduling period. If the end time is specified and the task completes execution by reaching the end time (or exceeds it, or the next execution period exceeds the end time), the Job status is updated to FINISHED, and no more Tasks are generated. - -#### Job Status - -A Job has four states (RUNNING, STOPPED, PAUSED, FINISHED), with the initial state being RUNNING. A Job in the RUNNING state generates Tasks based on the specified scheduling period. When a Job completes execution and reaches the end time, the status changes to FINISHED. - -A Job in the RUNNING state can be paused, which means it will no longer generate Tasks. - -A Job in the PAUSED state can be resumed by performing the RESUME operation, changing the state to RUNNING. - -A Job in the STOPPED state is triggered by the user, which cancels the running Job and then deletes it. - -A Job in the FINISHED state remains in the system for 24 hours and is deleted after that. - -#### Task status - -A Job only describes the job information, and the execution generates Tasks. The Task status can be PENDING, RUNNING, SUCCESS, FAILED, or CANCELED. - -PENDING indicates that the trigger time has been reached but resources are awaited for running. Once resources are allocated, the status changes to RUNNING. When the execution is successful or fails, the status changes to SUCCESS or FAILED, respectively. - -CANCELED indicates the cancellation status. The final status of a Task is persisted as SUCCESS or FAILED. Other statuses can be queried while the Task is running, but they become invisible after a restart. Only the latest 100 Task records are retained. - -#### Permissions - -Currently, only users with the ADMIN role can perform this operation. - -#### Related Documentation - -[PAUSE-JOB](../Alter/PAUSE-JOB.md),[RESUME-JOB](../Alter/RESUME-JOB.md),[DROP-JOB](../Drop/DROP-JOB.md), [TVF-JOB](../../../sql-functions/table-functions/job.md), -[TVF-TASKS](../../../sql-functions/table-functions/tasks) - -### Grammar - -```sql -CREATE - job - job_name - ON SCHEDULE schedule - [COMMENT 'string'] - DO sql_body; - -schedule: { - AT timestamp - | EVERY interval - [STARTS timestamp] - [ENDS timestamp ] -} - -interval: - quantity { DAY | HOUR | MINUTE | - WEEK | SECOND } -``` - -A valid Job statement must contain the following - -- The keyword CREATE JOB plus the job name, which uniquely identifies the event within a database. The job name must be globally unique, and if a JOB with the same name already exists, an error will be reported. We reserve the inner_ prefix for internal use, so users cannot create names starting with ***inner_***. -- The ON SCHEDULE clause, which specifies the type of Job and when and how often to trigger it. -- The DO clause, which specifies the actions that need to be performed when the Job is triggered. - -Here is a minimal example: - -```sql -CREATE JOB my_job ON SCHEDULE EVERY 1 MINUTE DO INSERT INTO db1.tbl1 SELECT * FROM db2.tbl2; -``` - -This statement means to create a job named my_job to be executed every minute, and the operation performed is to import the data in db2.tbl2 into db1.tbl1. - -The SCHEDULE statement is used to define the execution time, frequency and duration of the job, which can specify a one-time job or a periodic job. -- AT timestamp - -Format: 'YYYY-MM-DD HH:MM:SS'. Used for one-time events, it specifies that the event should only be executed once at the given date and time. Once the execution is complete, the Job status changes to FINISHED. - -- EVERY - - Indicates that the operation is repeated periodically, which specifies the execution frequency of the job. After the keyword, a time interval should be specified, which can be days, hours, minutes, seconds, and weeks. - - - interval - - Used to specify the Job execution frequency, which can be `day`, `hour`, `minute`, or `week`. For example, 1 `DAY` means the Job will run once every day, 1 `HOUR` means once every hour, 1 `MINUTE` means once every minute, and `1 The CREATE JOB statement is used to create a job in a database. A job is a task that can be scheduled to run at specific times or intervals to automate certain actions. - - - STARTS timestamp(optional) - - Format: 'YYYY-MM-DD HH:MM:SS'. It is used to specify the start time of the job. If not specified, the job starts executing from the next occurrence based on the current time. The start time must be greater than the current time. - - - ENDS timestamp(optional) - - Format: 'YYYY-MM-DD HH:MM:SS'. It is used to specify the end time of the job. If not specified, it means the job executes indefinitely. The end date must be greater than the current time. If a start time (↳STARTS) is specified, the end time must be greater than the start time. - -- DO - - It is used to specify the operation that needs to be performed when the job is triggered. Currently, all ***INSERT*** operations are supported. We will support more operations in the future. - -### Example - -Create a one-time job, which will be executed once at 2020-01-01 00:00:00, and the operation performed is to import the data in db2.tbl2 into db1.tbl1. - -```sql - -CREATE JOB my_job ON SCHEDULE AT '2020-01-01 00:00:00' DO INSERT INTO db1.tbl1 SELECT * FROM db2.tbl2; - -``` - -Create a periodic Job, which will start to execute at 2020-01-01 00:00:00, once a day, and the operation is to import the data in db2.tbl2 into db1.tbl1. - -```sql -CREATE JOB my_job ON SCHEDULE EVERY 1 DAY STARTS '2020-01-01 00:00:00' DO INSERT INTO db1.tbl1 SELECT * FROM db2.tbl2 WHERE create_time >= days_add(now(),-1); -``` - -Create a periodic Job, which will start to execute at 2020-01-01 00:00:00, and execute once a day. The operation performed is to import the data in db2.tbl2 into db1.tbl1. This Job will be executed in 2020 Ends at -01-01 00:10:00. - -```sql -CREATE JOB my_job ON SCHEDULE EVERY 1 DAY STARTS '2020-01-01 00:00:00' ENDS '2020-01-01 00:10:00' DO INSERT INTO db1.tbl1 SELECT * FROM db2.tbl2 create_time >= days_add (now(),-1); -``` - -### CONFIG - -#### fe.conf - -- job_dispatch_timer_job_thread_num: Number of threads used for dispatching scheduled tasks. Default value is 2. If there are a large number of periodically executed tasks, this parameter can be increased. -- job_dispatch_timer_job_queue_size: Size of the queue used for storing scheduled tasks when there is task accumulation. Default value is 1024. If there are a large number of tasks triggered at the same time, this parameter can be increased. Otherwise, the queue may become full and submitting tasks will be blocked, causing subsequent tasks to be unable to submit. -- finished_job_cleanup_threshold_time_hour: Time threshold, in hours, for cleaning up completed tasks. Default value is 24 hours. -- job_insert_task_consumer_thread_num: Number of threads used for executing Insert tasks. The value should be greater than 0, otherwise the default value is 5. - -### Best Practice - -- Properly manage Jobs to avoid triggering a large number of Jobs simultaneously, which can lead to task accumulation and affect the normal operation of the system. -- Set the execution interval of tasks within a reasonable range, ensuring that it is at least greater than the task execution time. - -### Keywords - - CREATE, JOB, SCHEDULE \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-MATERIALIZED-VIEW.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-MATERIALIZED-VIEW.md deleted file mode 100644 index b83632104e3ebd..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-MATERIALIZED-VIEW.md +++ /dev/null @@ -1,235 +0,0 @@ ---- -{ - "title": "CREATE-MATERIALIZED-VIEW", - "language": "en" -} ---- - - - -## CREATE-MATERIALIZED-VIEW - -### Name - -CREATE MATERIALIZED VIEW - -### Description - -This statement is used to create a materialized view. - -This operation is an asynchronous operation. After the submission is successful, you need to view the job progress through [SHOW ALTER TABLE MATERIALIZED VIEW](../../Show-Statements/SHOW-ALTER-TABLE-MATERIALIZED-VIEW.md). After displaying FINISHED, you can use the `desc [table_name] all` command to view the schema of the materialized view. - -grammar: - -```sql -CREATE MATERIALIZED VIEW < MV name > as < query > -[PROPERTIES ("key" = "value")] -```` - -illustrate: - -- `MV name`: The name of the materialized view, required. Materialized view names for the same table cannot be repeated. - -- `query`: The query statement used to construct the materialized view, the result of the query statement is the data of the materialized view. Currently supported query formats are: - - ```sql - SELECT select_expr[, select_expr ...] - FROM [Base view name] - GROUP BY column_name[, column_name ...] - ORDER BY column_name[, column_name ...] - ```` - - The syntax is the same as the query syntax. - - - `select_expr`: All columns in the schema of the materialized view. - - Contains at least one single column. - - `base view name`: The original table name of the materialized view, required. - - Must be a single table and not a subquery - - `group by`: The grouping column of the materialized view, optional. - - If not filled, the data will not be grouped. - - `order by`: the sorting column of the materialized view, optional. - - The declaration order of the sort column must be the same as the column declaration order in select_expr. - - If order by is not declared, the sorting column is automatically supplemented according to the rules. If the materialized view is an aggregate type, all grouping columns are automatically supplemented as sort columns. If the materialized view is of a non-aggregate type, the first 36 bytes are automatically supplemented as the sort column. - - If the number of auto-supplemented sorts is less than 3, the first three are used as the sort sequence. If query contains a grouping column, the sorting column must be the same as the grouping column. - -- properties - - Declare some configuration of the materialized view, optional. - - ````text - PROPERTIES ("key" = "value", "key" = "value" ...) - ```` - - The following configurations can be declared here: - - ````text - short_key: The number of sorting columns. - timeout: The timeout for materialized view construction. - ```` - -### Example - -Base table structure is - -```sql -mysql> desc duplicate_table; -+-------+--------+------+------+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-------+--------+------+------+---------+-------+ -| k1 | INT | Yes | true | N/A | | -| k2 | INT | Yes | true | N/A | | -| k3 | BIGINT | Yes | true | N/A | | -| k4 | BIGINT | Yes | true | N/A | | -+-------+--------+------+------+---------+-------+ -```` -```sql -create table duplicate_table( - k1 int null, - k2 int null, - k3 bigint null, - k4 bigint null -) -duplicate key (k1,k2,k3,k4) -distributed BY hash(k4) buckets 3 -properties("replication_num" = "1"); -``` -attention:If the materialized view contains partitioned and distributed columns of the Base table, these columns must be used as key columns in the materialized view - -1. Create a materialized view that contains only the columns of the original table (k1, k2) - - ```sql - create materialized view k2_k1 as - select k2, k1 from duplicate_table; - ```` - - The schema of the materialized view is as follows, the materialized view contains only two columns k1, k2 without any aggregation - - ````text - +-------+-------+--------+------+------+ ---------+-------+ - | IndexName | Field | Type | Null | Key | Default | Extra | - +-------+-------+--------+------+------+ ---------+-------+ - | k2_k1 | k2 | INT | Yes | true | N/A | | - | | k1 | INT | Yes | true | N/A | | - +-------+-------+--------+------+------+ ---------+-------+ - ```` - -2. Create a materialized view with k2 as the sort column - - ```sql - create materialized view k2_order as - select k2, k1 from duplicate_table order by k2; - ```` - - The schema of the materialized view is shown in the figure below. The materialized view contains only two columns k2, k1, where k2 is the sorting column without any aggregation. - - ````text - +-------+-------+--------+------+------- +---------+-------+ - | IndexName | Field | Type | Null | Key | Default | Extra | - +-------+-------+--------+------+------- +---------+-------+ - | k2_order | k2 | INT | Yes | true | N/A | | - | | k1 | INT | Yes | false | N/A | NONE | - +-------+-------+--------+------+------- +---------+-------+ - ```` - -3. Create a materialized view with k1, k2 grouping and k3 column aggregated by SUM - - ```sql - create materialized view k1_k2_sumk3 as - select k1, k2, sum(k3) from duplicate_table group by k1, k2; - ```` - - The schema of the materialized view is shown in the figure below. The materialized view contains two columns k1, k2, sum(k3) where k1, k2 are the grouping columns, and sum(k3) is the sum value of the k3 column grouped by k1, k2. - - Since the materialized view does not declare a sorting column, and the materialized view has aggregated data, the system defaults to supplement the grouping columns k1 and k2 as sorting columns. - - ````text - +-------+-------+--------+------+------- +---------+-------+ - | IndexName | Field | Type | Null | Key | Default | Extra | - +-------+-------+--------+------+------- +---------+-------+ - | k1_k2_sumk3 | k1 | INT | Yes | true | N/A | | - | | k2 | INT | Yes | true | N/A | | - | | k3 | BIGINT | Yes | false | N/A | SUM | - +-------+-------+--------+------+------- +---------+-------+ - ```` - -4. Create a materialized view that removes duplicate rows - - ```sql - create materialized view deduplicate as - select k1, k2, k3, k4 from duplicate_table group by k1, k2, k3, k4; - ```` - - The materialized view schema is as shown below. The materialized view contains columns k1, k2, k3, and k4, and there are no duplicate rows. - - ````text - +-------+-------+--------+------+------- +---------+-------+ - | IndexName | Field | Type | Null | Key | Default | Extra | - +-------+-------+--------+------+------- +---------+-------+ - | deduplicate | k1 | INT | Yes | true | N/A | | - | | k2 | INT | Yes | true | N/A | | - | | k3 | BIGINT | Yes | true | N/A | | - | | k4 | BIGINT | Yes | true | N/A | | - +-------+-------+--------+------+------- +---------+-------+ - ```` - -5. Create a non-aggregate materialized view that does not declare a sort column - - The schema of all_type_table is as follows - - ```` - +-------+--------------+------+-------+---------+- ------+ - | Field | Type | Null | Key | Default | Extra | - +-------+--------------+------+-------+---------+- ------+ - | k1 | TINYINT | Yes | true | N/A | | - | k2 | SMALLINT | Yes | true | N/A | | - | k3 | INT | Yes | true | N/A | | - | k4 | BIGINT | Yes | true | N/A | | - | k5 | DECIMAL(9,0) | Yes | true | N/A | | - | k6 | DOUBLE | Yes | false | N/A | NONE | - | k7 | VARCHAR(20) | Yes | false | N/A | NONE | - +-------+--------------+------+-------+---------+- ------+ - ```` - - The materialized view contains k3, k4, k5, k6, k7 columns, and does not declare a sort column, the creation statement is as follows: - - ```sql - create materialized view mv_1 as - select k3, k4, k5, k6, k7 from all_type_table; - ```` - - The default added sorting column of the system is k3, k4, k5 three columns. The sum of the bytes of these three column types is 4(INT) + 8(BIGINT) + 16(DECIMAL) = 28 < 36. So the addition is that these three columns are used as sorting columns. The schema of the materialized view is as follows, you can see that the key field of the k3, k4, k5 columns is true, that is, the sorting column. The key field of the k6, k7 columns is false, which is a non-sorted column. - - ```sql - +----------------+-------+--------------+------+-- -----+---------+-------+ - | IndexName | Field | Type | Null | Key | Default | Extra | - +----------------+-------+--------------+------+-- -----+---------+-------+ - | mv_1 | k3 | INT | Yes | true | N/A | | - | | k4 | BIGINT | Yes | true | N/A | | - | | k5 | DECIMAL(9,0) | Yes | true | N/A | | - | | k6 | DOUBLE | Yes | false | N/A | NONE | - | | k7 | VARCHAR(20) | Yes | false | N/A | NONE | - +----------------+-------+--------------+------+-- -----+---------+-------+ - ```` - -### Keywords - - CREATE, MATERIALIZED, VIEW - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-POLICY.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-POLICY.md deleted file mode 100644 index 86f3dc6f767570..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-POLICY.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -{ - "title": "CREATE-POLICY", - "language": "en" -} ---- - - - -## CREATE-POLICY - -### Name - -CREATE POLICY - -### Description - -Create policies,such as: -1. Create security policies(ROW POLICY) and explain to view the rewritten SQL. -2. Create storage migration policy(STORAGE POLICY), used for cold and hot data transform - -#### Grammar: - -1. ROW POLICY -```sql -CREATE ROW POLICY test_row_policy_1 ON test.table1 -AS {RESTRICTIVE|PERMISSIVE} TO test USING (id in (1, 2)); -``` - -illustrate: - -- filterType:It is usual to constrict a set of policies through AND. PERMISSIVE to constrict a set of policies through OR -- Configure multiple policies. First, merge the RESTRICTIVE policy with the PERMISSIVE policy -- It is connected with AND between RESTRICTIVE AND PERMISSIVE -- It cannot be created for users root and admin - -2. STORAGE POLICY -```sql -CREATE STORAGE POLICY test_storage_policy_1 -PROPERTIES ("key"="value", ...); -``` -illustrate: -- PROPERTIES has such keys: - 1. storage_resource:storage resource name for policy - 2. cooldown_datetime:cool down time for tablet, can't be set with cooldown_ttl. - 3. cooldown_ttl:hot data stay time. The time cost between the time of tablet created and - the time of migrated to cold data, formatted as: - 1d:1 day - 1h:1 hour - 50000: 50000 second - -### Example - -1. Create a set of row security policies - - ```sql - CREATE ROW POLICY test_row_policy_1 ON test.table1 - AS RESTRICTIVE TO test USING (c1 = 'a'); - ``` - ```sql - CREATE ROW POLICY test_row_policy_2 ON test.table1 - AS RESTRICTIVE TO test USING (c2 = 'b'); - ``` - ```sql - CREATE ROW POLICY test_row_policy_3 ON test.table1 - AS PERMISSIVE TO test USING (c3 = 'c'); - ``` - ```sql - CREATE ROW POLICY test_row_policy_3 ON test.table1 - AS PERMISSIVE TO test USING (c4 = 'd'); - ``` - - When we execute the query on Table1, the rewritten SQL is - - ```sql - select * from (select * from table1 where c1 = 'a' and c2 = 'b' or c3 = 'c' or c4 = 'd') - ``` - -2. Create policy for storage - 1. NOTE - - To create a cold hot separation policy, you must first create a resource, and then associate the created resource name when creating a migration policy - - Currently, the drop data migration policy is not supported to prevent data from being migrated. If the policy has been deleted, then the system cannot retrieve the data - 2. Create policy on cooldown_datetime - ```sql - CREATE STORAGE POLICY testPolicy - PROPERTIES( - "storage_resource" = "s3", - "cooldown_datetime" = "2022-06-08 00:00:00" - ); - ``` - 3. Create policy on cooldown_ttl - ```sql - CREATE STORAGE POLICY testPolicy - PROPERTIES( - "storage_resource" = "s3", - "cooldown_ttl" = "1d" - ); - ``` - Relevant parameters are as follows: - - `storage_resource`: the storage resource of create - - `cooldown_datetime`: Data migration time - - `cooldown_ttl`: Countdown of the distance between the migrated data and the current time - -### Keywords - - CREATE, POLICY - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-RESOURCE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-RESOURCE.md deleted file mode 100644 index 0dff3597175767..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-RESOURCE.md +++ /dev/null @@ -1,249 +0,0 @@ ---- -{ - "title": "CREATE-RESOURCE", - "language": "en" -} ---- - - - -## CREATE-RESOURCE - -### Name - -CREATE RESOURCE - -### Description - -This statement is used to create a resource. Only the root or admin user can create resources. Currently supports Spark, ODBC, S3 external resources. -In the future, other external resources may be added to Doris for use, such as Spark/GPU for query, HDFS/S3 for external storage, MapReduce for ETL, etc. - -grammar: - -```sql -CREATE [EXTERNAL] RESOURCE "resource_name" -PROPERTIES ("key"="value", ...); -```` - -illustrate: - -- The type of resource needs to be specified in PROPERTIES "type" = "[spark|odbc_catalog|s3]", currently supports spark, odbc_catalog, s3. -- PROPERTIES differs depending on the resource type, see the example for details. - -### Example - -1. Create a Spark resource named spark0 in yarn cluster mode. - - ```sql - CREATE EXTERNAL RESOURCE "spark0" - PROPERTIES - ( - "type" = "spark", - "spark.master" = "yarn", - "spark.submit.deployMode" = "cluster", - "spark.jars" = "xxx.jar,yyy.jar", - "spark.files" = "/tmp/aaa,/tmp/bbb", - "spark.executor.memory" = "1g", - "spark.yarn.queue" = "queue0", - "spark.hadoop.yarn.resourcemanager.address" = "127.0.0.1:9999", - "spark.hadoop.fs.defaultFS" = "hdfs://127.0.0.1:10000", - "working_dir" = "hdfs://127.0.0.1:10000/tmp/doris", - "broker" = "broker0", - "broker.username" = "user0", - "broker.password" = "password0" - ); - ```` - - Spark related parameters are as follows: - - spark.master: Required, currently supports yarn, spark://host:port. - - spark.submit.deployMode: The deployment mode of the Spark program, required, supports both cluster and client. - - spark.hadoop.yarn.resourcemanager.address: Required when master is yarn. - - spark.hadoop.fs.defaultFS: Required when master is yarn. - - Other parameters are optional, refer to [here](http://spark.apache.org/docs/latest/configuration.html) - - - - Working_dir and broker need to be specified when Spark is used for ETL. described as follows: - - - working_dir: The directory used by the ETL. Required when spark is used as an ETL resource. For example: hdfs://host:port/tmp/doris. - - broker: broker name. Required when spark is used as an ETL resource. Configuration needs to be done in advance using the `ALTER SYSTEM ADD BROKER` command. - - broker.property_key: The authentication information that the broker needs to specify when reading the intermediate file generated by ETL. - -2. Create an ODBC resource - - ```sql - CREATE EXTERNAL RESOURCE `oracle_odbc` - PROPERTIES ( - "type" = "odbc_catalog", - "host" = "192.168.0.1", - "port" = "8086", - "user" = "test", - "password" = "test", - "database" = "test", - "odbc_type" = "oracle", - "driver" = "Oracle 19 ODBC driver" - ); - ```` - - The relevant parameters of ODBC are as follows: - - hosts: IP address of the external database - - driver: The driver name of the ODBC appearance, which must be the same as the Driver name in be/conf/odbcinst.ini. - - odbc_type: the type of the external database, currently supports oracle, mysql, postgresql - - user: username of the foreign database - - password: the password information of the corresponding user - - charset: connection charset - - There is also support for implementing custom parameters per ODBC Driver, see the description of the corresponding ODBC Driver - -3. Create S3 resource - - ```sql - CREATE RESOURCE "remote_s3" - PROPERTIES - ( - "type" = "s3", - "s3.endpoint" = "bj.s3.com", - "s3.region" = "bj", - "s3.access_key" = "bbb", - "s3.secret_key" = "aaaa", - -- the followings are optional - "s3.connection.maximum" = "50", - "s3.connection.request.timeout" = "3000", - "s3.connection.timeout" = "1000" - ); - ``` - - If S3 resource is used for [cold hot separation](../../../../../docs/advanced/cold_hot_separation.md), we should add more required fields. - ```sql - CREATE RESOURCE "remote_s3" - PROPERTIES - ( - "type" = "s3", - "s3.endpoint" = "bj.s3.com", - "s3.region" = "bj", - "s3.access_key" = "bbb", - "s3.secret_key" = "aaaa", - -- required by cooldown - "s3.root.path" = "/path/to/root", - "s3.bucket" = "test-bucket" - ); - ``` - - S3 related parameters are as follows: - - Required parameters - - `s3.endpoint`: s3 endpoint - - `s3.region`:s3 region - - `s3.root.path`: s3 root directory - - `s3.access_key`: s3 access key - - `s3.secret_key`: s3 secret key - - `s3.bucket`:s3 bucket - - optional parameter - - `s3.connection.maximum`: the maximum number of s3 connections, the default is 50 - - `s3.connection.request.timeout`: s3 request timeout, in milliseconds, the default is 3000 - - `s3.connection.timeout`: s3 connection timeout, in milliseconds, the default is 1000 - -4. Create JDBC resource - - ```sql - CREATE RESOURCE mysql_resource PROPERTIES ( - "type"="jdbc", - "user"="root", - "password"="123456", - "jdbc_url" = "jdbc:mysql://127.0.0.1:3316/doris_test?useSSL=false", - "driver_url" = "https://doris-community-test-1308700295.cos.ap-hongkong.myqcloud.com/jdbc_driver/mysql-connector-java-8.0.25.jar", - "driver_class" = "com.mysql.cj.jdbc.Driver" - ); - ``` - - JDBC related parameters are as follows: - - user:The username used to connect to the database - - password:The password used to connect to the database - - jdbc_url: The identifier used to connect to the specified database - - driver_url: The url of JDBC driver package - - driver_class: The class of JDBC driver - -5. Create HDFS resource - - ```sql - CREATE RESOURCE hdfs_resource PROPERTIES ( - "type"="hdfs", - "username"="user", - "password"="passwd", - "dfs.nameservices" = "my_ha", - "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2", - "dfs.namenode.rpc-address.my_ha.my_namenode1" = "nn1_host:rpc_port", - "dfs.namenode.rpc-address.my_ha.my_namenode2" = "nn2_host:rpc_port", - "dfs.client.failover.proxy.provider" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" - ); - ``` - - HDFS related parameters are as follows: - - fs.defaultFS: namenode address and port - - username: hdfs username - - dfs.nameservices: if hadoop enable HA, please set fs nameservice. See hdfs-site.xml - - dfs.ha.namenodes.[nameservice ID]:unique identifiers for each NameNode in the nameservice. See hdfs-site.xml - - dfs.namenode.rpc-address.[nameservice ID].[name node ID]`:the fully-qualified RPC address for each NameNode to listen on. See hdfs-site.xml - - dfs.client.failover.proxy.provider.[nameservice ID]:the Java class that HDFS clients use to contact the Active NameNode, usually it is org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider - -6. Create HMS resource - - HMS resource is used to create [hms catalog](../../../../lakehouse/multi-catalog/multi-catalog.md) - ```sql - CREATE RESOURCE hms_resource PROPERTIES ( - 'type'='hms', - 'hive.metastore.uris' = 'thrift://127.0.0.1:7004', - 'dfs.nameservices'='HANN', - 'dfs.ha.namenodes.HANN'='nn1,nn2', - 'dfs.namenode.rpc-address.HANN.nn1'='nn1_host:rpc_port', - 'dfs.namenode.rpc-address.HANN.nn2'='nn2_host:rpc_port', - 'dfs.client.failover.proxy.provider.HANN'='org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider' - ); - ``` - - HMS related parameters are as follows: - - hive.metastore.uris: hive metastore server address - Optional: - - dfs.*: If hive data is on hdfs, HDFS resource parameters should be added, or copy hive-site.xml into fe/conf. - - s3.*: If hive data is on s3, S3 resource parameters should be added. If using [Aliyun Data Lake Formation](https://www.aliyun.com/product/bigdata/dlf), copy hive-site.xml into fe/conf. - -7. Create ES resource - - ```sql - CREATE RESOURCE es_resource PROPERTIES ( - "type"="es", - "hosts"="http://127.0.0.1:29200", - "nodes_discovery"="false", - "enable_keyword_sniff"="true" - ); - ``` - - ES related parameters are as follows: - - hosts: ES Connection Address, maybe one or more node, load-balance is also accepted - - user: username for ES - - password: password for the user - - enable_docvalue_scan: whether to enable ES/Lucene column storage to get the value of the query field, the default is true - - enable_keyword_sniff: Whether to probe the string segmentation type text.fields in ES, query by keyword (the default is true, false matches the content after the segmentation) - - nodes_discovery: Whether or not to enable ES node discovery, the default is true. In network isolation, set this parameter to false. Only the specified node is connected - - http_ssl_enabled: Whether ES cluster enables https access mode, the current FE/BE implementation is to trust all - -### Keywords - - CREATE, RESOURCE - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-SQL-BLOCK-RULE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-SQL-BLOCK-RULE.md deleted file mode 100644 index f40b364ecf174c..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-SQL-BLOCK-RULE.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -{ - "title": "CREATE-SQL-BLOCK-RULE", - "language": "en" -} ---- - - - -## CREATE-SQL-BLOCK-RULE - -### Name - -CREATE SQL BLOCK RULE - -### Description - -This statement creates a SQL blocking rule. it can restrict any kind of sql statements(no matter DDL or DML statement). - -Supports configuring SQL blacklists by user: - -- Refuse to specify SQL by regular matching -- Check if a sql reaches one of these limits by setting partition_num, tablet_num, cardinality - - partition_num, tablet_num, cardinality can be set together, once a query reaches one of these limits, the query will be intercepted - -grammar: - -```sql -CREATE SQL_BLOCK_RULE rule_name -[PROPERTIES ("key"="value", ...)]; -```` - -Parameter Description: - -- sql: matching rule (based on regular matching, special characters need to be translated,for example`select *`use`select \\*`), optional, the default value is "NULL" -- sqlHash: sql hash value, used for exact matching, we will print this value in `fe.audit.log`, optional, this parameter and sql can only be selected one, the default value is "NULL" -- partition_num: the maximum number of partitions a scan node will scan, the default value is 0L -- tablet_num: The maximum number of tablets that a scanning node will scan, the default value is 0L -- cardinality: the rough scan line number of a scan node, the default value is 0L -- global: Whether to take effect globally (all users), the default is false -- enable: whether to enable blocking rules, the default is true - -### Example - -1. Create a block rule named test_rule - - ```sql - CREATE SQL_BLOCK_RULE test_rule - PROPERTIES( - "sql"="select \\* from order_analysis", - "global"="false", - "enable"="true" - ); - ```` - - >Notes: - > - >That the sql statement here does not end with a semicolon - - When we execute the sql we just defined in the rule, an exception error will be returned. The example is as follows: - - ```sql - select * from order_analysis; - ERROR 1064 (HY000): errCode = 2, detailMessage = sql match regex sql block rule: order_analysis_rule - ```` - - -2. Create test_rule2, limit the maximum number of scanned partitions to 30, and limit the maximum scan base to 10 billion rows. The example is as follows: - - ```sql - CREATE SQL_BLOCK_RULE test_rule2 - PROPERTIES ( - "partition_num" = "30", - "cardinality" = "10000000000", - "global" = "false", - "enable" = "true" - ); - ```` -3. Create SQL BLOCK RULE with special chars - - ```sql - CREATE SQL_BLOCK_RULE test_rule3 - PROPERTIES - ( - "sql" = "select count\\(1\\) from db1.tbl1" - ); - CREATE SQL_BLOCK_RULE test_rule4 - PROPERTIES - ( - "sql" = "select \\* from db1.tbl1" - ); - ``` -4. In SQL_BLOCK_RULE, SQL matching is based on regular expressions. If want to match more patterns of SQL, need to write the corresponding regex. For example, to ignore spaces in SQL and not query tables that start with 'order_', as shown below: - - ```sql - CREATE SQL_BLOCK_RULE test_rule4 - PROPERTIES( - "sql"="\\s*select\\s*\\*\\s*from order_\\w*\\s*", - "global"="false", - "enable"="true" - ); - ``` - -### APPENDIX -Here are some commonly used regular expressions: -> . :Matches any single character except for a newline character \n. -> -> * :Matches the preceding element zero or more times. For example, a matches zero or more 'a'. -> -> + :Matches the preceding element one or more times. For example, a+ matches one or more 'a'. -> -> ? :Matches the preceding element zero or one time. For example, a? matches zero or one 'a'. -> -> [] :Used to define a character set. For example, [aeiou] matches any one vowel letter. -> -> [^] :In a character set, use ^ to indicate negation, matching characters that are not in the set. For example, [^0-9] matches any non-digit character. -> -> () :Used for grouping expressions and applying quantifiers. For example, (ab)+ matches consecutive 'ab'. -> -> | :Represents logical OR. For example, a|b matches 'a' or 'b'. -> -> ^ :Matches the beginning of a string. For example, ^abc matches a string that starts with 'abc'. -> -> $ :Matches the end of a string. For example, xyz$ matches a string that ends with 'xyz'. -> -> \ :Used to escape special characters to treat them as ordinary characters. For example, \\. matches the period character '.'. -> -> \s :Matches any whitespace character, including spaces, tabs, newline characters, etc. -> -> \d :Matches any digit character, equivalent to [0-9]. -> -> \w :Matches any word character, including letters, digits, and underscores, equivalent to [a-zA-Z0-9_]. - -### Keywords - -````text -CREATE, SQL_BLCOK_RULE -```` - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE-AS-SELECT.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE-AS-SELECT.md deleted file mode 100644 index fa13ab62999fa9..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE-AS-SELECT.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -{ - "title": "CREATE-TABLE-AS-SELECT", - "language": "en" -} ---- - - - -## CREATE-TABLE-AS-SELECT - -### Name - -CREATE TABLE AS SELECT - -### Description - -This statement creates the table structure by returning the results from the Select statement and imports the data at the same time - -grammar: - -```sql -CREATE TABLE table_name [( column_name_list )] - opt_engine:engineName - opt_keys:keys - opt_comment:tableComment - opt_partition:partition - opt_distribution:distribution - opt_rollup:index - opt_properties:tblProperties - opt_ext_properties:extProperties - KW_AS query_stmt:query_def - ``` - -illustrate: - -- The user needs to have`SELECT`permission for the source table and`CREATE`permission for the target database -- After a table is created, data is imported. If the import fails, the table is deleted -- You can specify the key type. The default key type is `Duplicate Key` - - - -- All columns of type string (varchar/var/string) are created as type "string". -- If the created source is an external table and the first column is of type String, the first column is automatically set to VARCHAR(65533). Because of Doris internal table, String column is not allowed as first column. - - - -### Example - -1. Using the field names in the SELECT statement - - ```sql - create table `test`.`select_varchar` - PROPERTIES("replication_num" = "1") - as select * from `test`.`varchar_table` - ``` - -2. Custom field names (need to match the number of fields returned) - ```sql - create table `test`.`select_name`(user, testname, userstatus) - PROPERTIES("replication_num" = "1") - as select vt.userId, vt.username, jt.status - from `test`.`varchar_table` vt join - `test`.`join_table` jt on vt.userId=jt.userId - ``` - -3. Specify table model, partitions, and buckets - ```sql - CREATE TABLE t_user(dt, id, name) - ENGINE=OLAP - UNIQUE KEY(dt, id) - COMMENT "OLAP" - PARTITION BY RANGE(dt) - ( - FROM ("2020-01-01") TO ("2021-12-31") INTERVAL 1 YEAR - ) - DISTRIBUTED BY HASH(id) BUCKETS 1 - PROPERTIES("replication_num"="1") - AS SELECT cast('2020-05-20' as date) as dt, 1 as id, 'Tom' as name; - ``` - -### Keywords - - CREATE, TABLE, AS, SELECT - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE-LIKE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE-LIKE.md deleted file mode 100644 index 1e52f6fd269563..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE-LIKE.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -{ - "title": "CREATE-TABLE-LIKE", - "language": "en" -} ---- - - - -## CREATE-TABLE-LIKE - -### Name - -CREATE TABLE LIKE - -### Description - -This statement is used to create an empty table with the exact same table structure as another table, and can optionally replicate some rollups. - -grammar: - -```sql -CREATE [EXTERNAL] TABLE [IF NOT EXISTS] [database.]table_name LIKE [database.]table_name [WITH ROLLUP (r1,r2,r3,...)] -```` - -illustrate: - -- The copied table structure includes Column Definition, Partitions, Table Properties, etc. -- The user needs to have `SELECT` permission on the copied original table -- Support for copying external tables such as MySQL -- Support the rollup of copying OLAP Table - -### Example - -1. Create an empty table with the same table structure as table1 under the test1 library, the table name is table2 - - ```sql - CREATE TABLE test1.table2 LIKE test1.table1 - ```` - -2. Create an empty table with the same table structure as test1.table1 under the test2 library, the table name is table2 - - ```sql - CREATE TABLE test2.table2 LIKE test1.table1 - ```` - -3. Create an empty table with the same table structure as table1 under the test1 library, the table name is table2, and copy the two rollups of r1 and r2 of table1 at the same time - - ```sql - CREATE TABLE test1.table2 LIKE test1.table1 WITH ROLLUP (r1,r2) - ```` - -4. Create an empty table with the same table structure as table1 under the test1 library, the table name is table2, and copy all the rollups of table1 at the same time - - ```sql - CREATE TABLE test1.table2 LIKE test1.table1 WITH ROLLUP - ```` - -5. Create an empty table with the same table structure as test1.table1 under the test2 library, the table name is table2, and copy the two rollups of r1 and r2 of table1 at the same time - - ```sql - CREATE TABLE test2.table2 LIKE test1.table1 WITH ROLLUP (r1,r2) - ```` - -6. Create an empty table with the same table structure as test1.table1 under the test2 library, the table name is table2, and copy all the rollups of table1 at the same time - - ```sql - CREATE TABLE test2.table2 LIKE test1.table1 WITH ROLLUP - ```` - -7. Create an empty table under the test1 library with the same table structure as the MySQL outer table1, the table name is table2 - - ```sql - CREATE TABLE test1.table2 LIKE test1.table1 - ```` - -### Keywords - - CREATE, TABLE, LIKE - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE.md deleted file mode 100644 index 0bf9590ebf1179..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-TABLE.md +++ /dev/null @@ -1,856 +0,0 @@ ---- -{ - "title": "CREATE-TABLE", - "language": "en", - "toc_min_heading_level": 2, - "toc_max_heading_level": 4 -} ---- - - - -## CREATE-TABLE - -### Description - -This command is used to create a table. The subject of this document describes the syntax for creating Doris self-maintained tables. For external table syntax, please refer to the [CREATE-EXTERNAL-TABLE](./CREATE-EXTERNAL-TABLE.md) document. - -```sql -CREATE TABLE [IF NOT EXISTS] [database.]table -( - column_definition_list - [, index_definition_list] -) -[engine_type] -[keys_type] -[table_comment] -[partition_info] -distribution_desc -[rollup_list] -[properties] -[extra_properties] -``` - -#### column_definition_list - -Column definition list: - -`column_definition[, column_definition]` - -* `column_definition` - - Column definition: - - `column_name column_type [KEY] [aggr_type] [NULL] [AUTO_INCREMENT(auto_inc_start_value)] [default_value] [on update current_timestamp] [column_comment]` - - * `column_type` - - Column type, the following types are supported: - - ``` - TINYINT (1 byte) - Range: -2^7 + 1 ~ 2^7-1 - SMALLINT (2 bytes) - Range: -2^15 + 1 ~ 2^15-1 - INT (4 bytes) - Range: -2^31 + 1 ~ 2^31-1 - BIGINT (8 bytes) - Range: -2^63 + 1 ~ 2^63-1 - LARGEINT (16 bytes) - Range: -2^127 + 1 ~ 2^127-1 - FLOAT (4 bytes) - Support scientific notation - DOUBLE (12 bytes) - Support scientific notation - DECIMAL[(precision, scale)] (16 bytes) - The decimal type with guaranteed precision. The default is DECIMAL(10, 0) - precision: 1 ~ 27 - scale: 0 ~ 9 - Where the integer part is 1 ~ 18 - Does not support scientific notation - DATE (3 bytes) - Range: 0000-01-01 ~ 9999-12-31 - DATETIME (8 bytes) - Range: 0000-01-01 00:00:00 ~ 9999-12-31 23:59:59 - CHAR[(length)] - Fixed-length character string. Length range: 1 ~ 255. Default is 1 - VARCHAR[(length)] - Variable length character string. Length range: 1 ~ 65533. Default is 65533 - HLL (1~16385 bytes) - HyperLogLog column type, do not need to specify the length and default value. The length is controlled within the system according to the degree of data aggregation. - Must be used with HLL_UNION aggregation type. - BITMAP - The bitmap column type does not need to specify the length and default value. Represents a collection of integers, and the maximum number of elements supported is 2^64-1. - Must be used with BITMAP_UNION aggregation type. - ``` - - * `aggr_type` - - Aggregation type, the following aggregation types are supported: - - ``` - SUM: Sum. Applicable numeric types. - MIN: Find the minimum value. Suitable for numeric types. - MAX: Find the maximum value. Suitable for numeric types. - REPLACE: Replace. For rows with the same dimension column, the index column will be imported in the order of import, and the last imported will replace the first imported. - REPLACE_IF_NOT_NULL: non-null value replacement. The difference with REPLACE is that there is no replacement for null values. It should be noted here that the default value should be NULL, not an empty string. If it is an empty string, you should replace it with an empty string. - HLL_UNION: The aggregation method of HLL type columns, aggregated by HyperLogLog algorithm. - BITMAP_UNION: The aggregation mode of BIMTAP type columns, which performs the union aggregation of bitmaps. - ``` - - * `AUTO_INCREMENT(auto_inc_start_value)` - - To indicate if the column is a auto-increment column. Auto-increment column can be used to generate a unique identity for new row. If no values are assgined for auto-increment column when inserting, Doris will generate sequence numbers automatically. You can also assign the auto-increment column with NULL literal to indicate Doris to generate sequence numbers. It should be noted that, for performance reasons, BE will cache some values of auto-increment column in memory. Therefore, the values generated by auto-increment column can only guarantee monotonicity and uniqueness, but not strict continuity. - A table can have at most one auto-incremnt column. The auto-increment column should be BIGINT type and be NOT NULL. - Both Duplicate model table and Unique model table support auto-increment column. - You can specify the starting value for an auto-increment column by providing `auto_inc_start_value. If not specified, the default starting value is 1. - - * `default_value` - - Default value of the column. If the load data does not specify a value for this column, the system will assign a default value to this column. - - The syntax is: `default default_value`. - - Currently, the default value supports two forms: - - 1. The user specifies a fixed value, such as: - - ```SQL - k1 INT DEFAULT '1', - k2 CHAR(10) DEFAULT 'aaaa' - ``` - 2. Keywords are provided by the system. Currently, the following keywords are supported: - - ```SQL - // This keyword is used only for DATETIME type. If the value is missing, the system assigns the current timestamp. - dt DATETIME DEFAULT CURRENT_TIMESTAMP - ``` - - * `on update current_timestamp` - - To indicate that whether the value of this column should be updated to the current timestamp (`current_timestamp`) when there is an update on the row. The feature is only available on unique table with merge-on-write enabled. Columns with this feature enabled must declare a default value, and the default value must be `current_timestamp`. If the precision of the timestamp is declared here, the timestamp precision in the default value of the column must be the same as the precision declared here." - - Example: - - ``` - k1 TINYINT, - k2 DECIMAL(10,2) DEFAULT "10.5", - k4 BIGINT NULL DEFAULT "1000" COMMENT "This is column k4", - v1 VARCHAR(10) REPLACE NOT NULL, - v2 BITMAP BITMAP_UNION, - v3 HLL HLL_UNION, - v4 INT SUM NOT NULL DEFAULT "1" COMMENT "This is column v4" - dt datetime(6) default current_timestamp(6) on update current_timestamp(6) - ``` - -#### index_definition_list - -Index list definition: - -`index_definition[, index_definition]` - -* `index_definition` - - Index definition: - - ```sql - INDEX index_name (col_name) [USING INVERTED] COMMENT'xxxxxx' - ``` - - Example: - - ```sql - INDEX idx1 (k1) USING INVERTED COMMENT "This is a inverted index1", - INDEX idx2 (k2) USING INVERTED COMMENT "This is a inverted index2", - ... - ``` - -#### engine_type - -Table engine type. All types in this document are OLAP. For other external table engine types, see [CREATE EXTERNAL TABLE](./CREATE-EXTERNAL-TABLE.md) document. Example: - - `ENGINE=olap` - -#### keys_type - -Data model. - -`key_type(col1, col2, ...)` - -`key_type` supports the following models: - -* DUPLICATE KEY (default): The subsequent specified column is the sorting column. -* AGGREGATE KEY: The specified column is the dimension column. -* UNIQUE KEY: The subsequent specified column is the primary key column. - - -NOTE: when set table property `"enable_duplicate_without_keys_by_default" = "true"`, will create a duplicate model without sorting columns and prefix indexes by default. - - -Example: - -``` -DUPLICATE KEY(col1, col2), -AGGREGATE KEY(k1, k2, k3), -UNIQUE KEY(k1, k2) -``` - -#### table_comment - -Table notes. Example: - - ``` - COMMENT "This is my first DORIS table" - ``` - -#### partition_info - -Partition information supports three writing methods: - -1. LESS THAN: Only define the upper boundary of the partition. The lower bound is determined by the upper bound of the previous partition. - - ``` - PARTITION BY RANGE(col1[, col2, ...]) - ( - PARTITION partition_name1 VALUES LESS THAN MAXVALUE|("value1", "value2", ...), - PARTITION partition_name2 VALUES LESS THAN MAXVALUE|("value1", "value2", ...) - ) - ``` - -2. FIXED RANGE: Define the left closed and right open interval of the zone. - - ``` - PARTITION BY RANGE(col1[, col2, ...]) - ( - PARTITION partition_name1 VALUES [("k1-lower1", "k2-lower1", "k3-lower1",...), ("k1-upper1", "k2-upper1", "k3-upper1", ... )), - PARTITION partition_name2 VALUES [("k1-lower1-2", "k2-lower1-2", ...), ("k1-upper1-2", MAXVALUE, )) - ) - ``` - - - -3. MULTI RANGE:Multi build RANGE partitions,Define the left closed and right open interval of the zone, Set the time unit and step size, the time unit supports year, month, day, week and hour. - - ``` - PARTITION BY RANGE(col) - ( - FROM ("2000-11-14") TO ("2021-11-14") INTERVAL 1 YEAR, - FROM ("2021-11-14") TO ("2022-11-14") INTERVAL 1 MONTH, - FROM ("2022-11-14") TO ("2023-01-03") INTERVAL 1 WEEK, - FROM ("2023-01-03") TO ("2023-01-14") INTERVAL 1 DAY - ) - ``` - - - - -4. MULTI RANGE:Multi build integer RANGE partitions,Define the left closed and right open interval of the zone, and step size. - - ``` - PARTITION BY RANGE(int_col) - ( - FROM (1) TO (100) INTERVAL 10 - ) - ``` - -#### distribution_desc - -Define the data bucketing method. - -1. Hash - Syntax: - `DISTRIBUTED BY HASH (k1[,k2 ...]) [BUCKETS num|auto]` - Explain: - Hash bucketing using the specified key column. - -2. Random - Syntax: - `DISTRIBUTED BY RANDOM [BUCKETS num|auto]` - Explain: - Use random numbers for bucketing. - -#### rollup_list - -Multiple materialized views (ROLLUP) can be created at the same time as the table is built. - -`ROLLUP (rollup_definition[, rollup_definition, ...])` - -* `rollup_definition` - - `rollup_name (col1[, col2, ...]) [DUPLICATE KEY(col1[, col2, ...])] [PROPERTIES("key" = "value")]` - - Example: - - ``` - ROLLUP ( - r1 (k1, k3, v1, v2), - r2 (k1, v1) - ) - ``` - -#### properties - -Set table properties. The following attributes are currently supported: - -* `replication_num` - - Number of copies. The default number of copies is 3. If the number of BE nodes is less than 3, you need to specify that the number of copies is less than or equal to the number of BE nodes. - - After version 0.15, this attribute will be automatically converted to the `replication_allocation` attribute, such as: - - `"replication_num" = "3"` will be automatically converted to `"replication_allocation" = "tag.location.default:3"` - -* `replication_allocation` - - Set the copy distribution according to Tag. This attribute can completely cover the function of the `replication_num` attribute. - -* `min_load_replica_num` - - The minimum required successful replica num for loading data. The default value is `-1`. If set less than or equal to 0, loading data requires a majority replicas to succeed. - -* `is_being_synced` - - Used to identify whether this table is copied by CCR and is being synchronized by syncer. The default is `false`. - - If set to `true`: - `colocate_with`, `storage_policy` properties will be erased - `dynamic partition`, `auto bucket` features will be disabled, that is, they will be displayed as enabled in `show create table`, but will not actually take effect. When `is_being_synced` is set to `false`, these features will resume working. - - This property is for CCR peripheral modules only and should not be manually set during CCR synchronization. - -* `storage_medium/storage_cooldown_time` - - Data storage medium. `storage_medium` is used to declare the initial storage medium of the table data, and `storage_cooldown_time` is used to set the expiration time. Example: - - ``` - "storage_medium" = "SSD", - "storage_cooldown_time" = "2020-11-20 00:00:00" - ``` - - This example indicates that the data is stored in the SSD and will be automatically migrated to the HDD storage after the expiration of 2020-11-20 00:00:00. - -* `colocate_with` - - When you need to use the Colocation Join function, use this parameter to set the Colocation Group. - - `"colocate_with" = "group1"` - -* `bloom_filter_columns` - - The user specifies the list of column names that need to be added to the Bloom Filter index. The Bloom Filter index of each column is independent, not a composite index. - - `"bloom_filter_columns" = "k1, k2, k3"` - -* `in_memory` - - Deprecated. - -* `function_column.sequence_col` - - When using the UNIQUE KEY model, you can specify a sequence column. When the KEY columns are the same, REPLACE will be performed according to the sequence column (the larger value replaces the smaller value, otherwise it cannot be replaced) - - The `function_column.sequence_col` is used to specify the mapping of the sequence column to a column in the table, which can be integral and time (DATE, DATETIME). The type of this column cannot be changed after creation. If `function_column.sequence_col` is set, `function_column.sequence_type` is ignored. - - `"function_column.sequence_col" ='column_name'` - -* `function_column.sequence_type` - - When using the UNIQUE KEY model, you can specify a sequence column. When the KEY columns are the same, REPLACE will be performed according to the sequence column (the larger value replaces the smaller value, otherwise it cannot be replaced) - - Here we only need to specify the type of sequence column, support time type or integer type. Doris will create a hidden sequence column. - - `"function_column.sequence_type" ='Date'` - -* `compression` - - The default compression method for Doris tables is LZ4. After version 1.1, it is supported to specify the compression method as ZSTD to obtain a higher compression ratio. - - `"compression"="zstd"` - -* `enable_unique_key_merge_on_write` - - Wheather the unique table use merge-on-write implementation. - - The property is disabled by default before version 2.1 and is enabled by default since version 2.1. - -* `light_schema_change` - - Whether to use the Light Schema Change optimization. - - If set to true, the addition and deletion of value columns can be done more quickly and synchronously. - - `"light_schema_change"="true"` - - This feature is enabled by default after v2.0.0. - -* `disable_auto_compaction` - - Whether to disable automatic compaction for this table. - - If this property is set to 'true', the background automatic compaction process will skip all the tables of this table. - - `"disable_auto_compaction" = "false"` - -* `enable_single_replica_compaction` - - Whether to enable single replica compaction for this table. - - If this property is set to 'true', all replicas of the tablet will only have one replica performing compaction, while the others fetch rowsets from that replica. - - `"enable_single_replica_compaction" = "false"` - -* `enable_duplicate_without_keys_by_default` - - When `true`, if Unique, Aggregate, or Duplicate is not specified when creating a table, a Duplicate model table without sorting columns and prefix indexes will be created by default. - - `"enable_duplicate_without_keys_by_default" = "false"` - -* `skip_write_index_on_load` - - Whether to enable skip inverted index on load for this table. - - If this property is set to 'true', skip writting index (only inverted index now) on first time load and delay writting - index to compaction. It can reduce CPU and IO resource usage for high throughput load. - - `"skip_write_index_on_load" = "false"` - -* `compaction_policy` - - Configure the compaction strategy in the compression phase. Only support configuring the compaction policy as "time_series" or "size_based". - - time_series: When the disk size of a rowset accumulates to a certain threshold, version merging takes place. The merged rowset is directly promoted to the base compaction stage. This approach effectively reduces the write amplification rate of compaction, especially in scenarios with continuous imports in a time series context. - - In the case of time series compaction, the execution of compaction is adjusted using parameters that have the prefix time_series_compaction. - - `"compaction_policy" = ""` - -* `time_series_compaction_goal_size_mbytes` - - Time series compaction policy will utilize this parameter to adjust the size of input files for each compaction. The output file size will be approximately equal to the input file size. - - `"time_series_compaction_goal_size_mbytes" = "1024"` - -* `time_series_compaction_file_count_threshold` - - Time series compaction policy will utilize this parameter to adjust the minimum number of input files for each compaction. - - If the number of files in a tablet exceeds the configured threshold, it will trigger a compaction process. - - `"time_series_compaction_file_count_threshold" = "2000"` - -* `time_series_compaction_time_threshold_seconds` - - When time series compaction policy is applied, a significant duration passes without a compaction being executed, a compaction will be triggered. - - `"time_series_compaction_time_threshold_seconds" = "3600"` - -* `time_series_compaction_level_threshold` - - When time series compaction policy is applied, This parameter defaults to 1. When set to 2, it is used to control the re-merging of segments that have been - - merged once, ensuring that the segment size reaches the time_series_compaction_goal_size_mbytes, which can achieve the effect of reducing the number of - - segments. - - `"time_series_compaction_level_threshold" = "2"` - -* Dynamic partition related - - The relevant parameters of dynamic partition are as follows: - -* `dynamic_partition.enable`: Used to specify whether the dynamic partition function at the table level is enabled. The default is true. -* `dynamic_partition.time_unit:` is used to specify the time unit for dynamically adding partitions, which can be selected as DAY (day), WEEK (week), MONTH (month), YEAR (year), HOUR (hour). -* `dynamic_partition.start`: Used to specify how many partitions to delete forward. The value must be less than 0. The default is Integer.MIN_VALUE. -* `dynamic_partition.end`: Used to specify the number of partitions created in advance. The value must be greater than 0. -* `dynamic_partition.prefix`: Used to specify the partition name prefix to be created. For example, if the partition name prefix is ​​p, the partition name will be automatically created as p20200108. -* `dynamic_partition.buckets`: Used to specify the number of partition buckets that are automatically created. -* `dynamic_partition.create_history_partition`: Whether to create a history partition. -* `dynamic_partition.history_partition_num`: Specify the number of historical partitions to be created. -* `dynamic_partition.reserved_history_periods`: Used to specify the range of reserved history periods. - -### Example - -1. Create a detailed model table - - ```sql - CREATE TABLE example_db.table_hash - ( - k1 TINYINT, - k2 DECIMAL(10, 2) DEFAULT "10.5", - k3 CHAR(10) COMMENT "string column", - k4 INT NOT NULL DEFAULT "1" COMMENT "int column" - ) - COMMENT "my first table" - DISTRIBUTED BY HASH(k1) BUCKETS 32 - ``` - -2. Create a detailed model table, partition, specify the sorting column, and set the number of copies to 1 - - ```sql - CREATE TABLE example_db.table_hash - ( - k1 DATE, - k2 DECIMAL(10, 2) DEFAULT "10.5", - k3 CHAR(10) COMMENT "string column", - k4 INT NOT NULL DEFAULT "1" COMMENT "int column" - ) - DUPLICATE KEY(k1, k2) - COMMENT "my first table" - PARTITION BY RANGE(k1) - ( - PARTITION p1 VALUES LESS THAN ("2020-02-01"), - PARTITION p2 VALUES LESS THAN ("2020-03-01"), - PARTITION p3 VALUES LESS THAN ("2020-04-01") - ) - DISTRIBUTED BY HASH(k1) BUCKETS 32 - PROPERTIES ( - "replication_num" = "1" - ); - ``` - -3. Create a table with a unique model of the primary key, set the initial storage medium and cooling time - - ```sql - CREATE TABLE example_db.table_hash - ( - k1 BIGINT, - k2 LARGEINT, - v1 VARCHAR(2048), - v2 SMALLINT DEFAULT "10" - ) - UNIQUE KEY(k1, k2) - DISTRIBUTED BY HASH (k1, k2) BUCKETS 32 - PROPERTIES( - "storage_medium" = "SSD", - "storage_cooldown_time" = "2015-06-04 00:00:00" - ); - ``` - -4. Create an aggregate model table, using a fixed range partition description - - ```sql - CREATE TABLE table_range - ( - k1 DATE, - k2 INT, - k3 SMALLINT, - v1 VARCHAR(2048) REPLACE, - v2 INT SUM DEFAULT "1" - ) - AGGREGATE KEY(k1, k2, k3) - PARTITION BY RANGE (k1, k2, k3) - ( - PARTITION p1 VALUES [("2014-01-01", "10", "200"), ("2014-01-01", "20", "300")), - PARTITION p2 VALUES [("2014-06-01", "100", "200"), ("2014-07-01", "100", "300")) - ) - DISTRIBUTED BY HASH(k2) BUCKETS 32 - ``` - -5. Create an aggregate model table with HLL and BITMAP column types - - ```sql - CREATE TABLE example_db.example_table - ( - k1 TINYINT, - k2 DECIMAL(10, 2) DEFAULT "10.5", - v1 HLL HLL_UNION, - v2 BITMAP BITMAP_UNION - ) - ENGINE=olap - AGGREGATE KEY(k1, k2) - DISTRIBUTED BY HASH(k1) BUCKETS 32 - ``` - -6. Create two self-maintained tables of the same Colocation Group. - - ```sql - CREATE TABLE t1 ( - id int(11) COMMENT "", - value varchar(8) COMMENT "" - ) - DUPLICATE KEY(id) - DISTRIBUTED BY HASH(id) BUCKETS 10 - PROPERTIES ( - "colocate_with" = "group1" - ); - - CREATE TABLE t2 ( - id int(11) COMMENT "", - value1 varchar(8) COMMENT "", - value2 varchar(8) COMMENT "" - ) - DUPLICATE KEY(`id`) - DISTRIBUTED BY HASH(`id`) BUCKETS 10 - PROPERTIES ( - "colocate_with" = "group1" - ); - ``` - -7. Create a table with inverted index and bloom filter index - - ```sql - CREATE TABLE example_db.table_hash - ( - k1 TINYINT, - k2 DECIMAL(10, 2) DEFAULT "10.5", - v1 CHAR(10) REPLACE, - v2 INT SUM, - INDEX k1_idx (k1) USING INVERTED COMMENT'my first index' - ) - AGGREGATE KEY(k1, k2) - DISTRIBUTED BY HASH(k1) BUCKETS 32 - PROPERTIES ( - "bloom_filter_columns" = "k2" - ); - ``` - -8. Create a dynamic partition table. - - The table creates partitions 3 days in advance every day, and deletes the partitions 3 days ago. For example, if today is `2020-01-08`, partitions named `p20200108`, `p20200109`, `p20200110`, `p20200111` will be created. The partition ranges are: - - ``` - [types: [DATE]; keys: [2020-01-08]; ‥types: [DATE]; keys: [2020-01-09];) - [types: [DATE]; keys: [2020-01-09]; ‥types: [DATE]; keys: [2020-01-10];) - [types: [DATE]; keys: [2020-01-10]; ‥types: [DATE]; keys: [2020-01-11];) - [types: [DATE]; keys: [2020-01-11]; ‥types: [DATE]; keys: [2020-01-12];) - ``` - - ```sql - CREATE TABLE example_db.dynamic_partition - ( - k1 DATE, - k2 INT, - k3 SMALLINT, - v1 VARCHAR(2048), - v2 DATETIME DEFAULT "2014-02-04 15:36:00" - ) - DUPLICATE KEY(k1, k2, k3) - PARTITION BY RANGE (k1) () - DISTRIBUTED BY HASH(k2) BUCKETS 32 - PROPERTIES( - "dynamic_partition.time_unit" = "DAY", - "dynamic_partition.start" = "-3", - "dynamic_partition.end" = "3", - "dynamic_partition.prefix" = "p", - "dynamic_partition.buckets" = "32" - ); - ``` - -9. Create a table with a materialized view (ROLLUP). - - ```sql - CREATE TABLE example_db.rolup_index_table - ( - event_day DATE, - siteid INT DEFAULT '10', - citycode SMALLINT, - username VARCHAR(32) DEFAULT'', - pv BIGINT SUM DEFAULT '0' - ) - AGGREGATE KEY(event_day, siteid, citycode, username) - DISTRIBUTED BY HASH(siteid) BUCKETS 10 - ROLLUP ( - r1(event_day,siteid), - r2(event_day,citycode), - r3(event_day) - ) - PROPERTIES("replication_num" = "3"); - ``` - -10. Set the replica of the table through the `replication_allocation` property. - - ```sql - CREATE TABLE example_db.table_hash - ( - k1 TINYINT, - k2 DECIMAL(10, 2) DEFAULT "10.5" - ) - DISTRIBUTED BY HASH(k1) BUCKETS 32 - PROPERTIES ( - "replication_allocation"="tag.location.group_a:1, tag.location.group_b:2" - ); - ``` - ```sql - CREATE TABLE example_db.dynamic_partition - ( - k1 DATE, - k2 INT, - k3 SMALLINT, - v1 VARCHAR(2048), - v2 DATETIME DEFAULT "2014-02-04 15:36:00" - ) - PARTITION BY RANGE (k1) () - DISTRIBUTED BY HASH(k2) BUCKETS 32 - PROPERTIES( - "dynamic_partition.time_unit" = "DAY", - "dynamic_partition.start" = "-3", - "dynamic_partition.end" = "3", - "dynamic_partition.prefix" = "p", - "dynamic_partition.buckets" = "32", - "dynamic_partition.replication_allocation" = "tag.location.group_a:3" - ); - ``` - -11. Set the table hot and cold separation policy through the `storage_policy` property. - ```sql - CREATE TABLE IF NOT EXISTS create_table_use_created_policy - ( - k1 BIGINT, - k2 LARGEINT, - v1 VARCHAR(2048) - ) - UNIQUE KEY(k1) - DISTRIBUTED BY HASH (k1) BUCKETS 3 - PROPERTIES( - "storage_policy" = "test_create_table_use_policy", - "replication_num" = "1" - ); - ``` -NOTE: Need to create the s3 resource and storage policy before the table can be successfully associated with the migration policy - -12. Add a hot and cold data migration strategy for the table partition - ```sql - CREATE TABLE create_table_partion_use_created_policy - ( - k1 DATE, - k2 INT, - V1 VARCHAR(2048) REPLACE - ) PARTITION BY RANGE (k1) ( - PARTITION p1 VALUES LESS THAN ("2022-01-01") ("storage_policy" = "test_create_table_partition_use_policy_1" ,"replication_num"="1"), - PARTITION p2 VALUES LESS THAN ("2022-02-01") ("storage_policy" = "test_create_table_partition_use_policy_2" ,"replication_num"="1") - ) DISTRIBUTED BY HASH(k2) BUCKETS 1; - ``` -NOTE: Need to create the s3 resource and storage policy before the table can be successfully associated with the migration policy - - - -13. Multi Partition by a partition desc - ```sql - CREATE TABLE create_table_multi_partion_date - ( - k1 DATE, - k2 INT, - V1 VARCHAR(20) - ) PARTITION BY RANGE (k1) ( - FROM ("2000-11-14") TO ("2021-11-14") INTERVAL 1 YEAR, - FROM ("2021-11-14") TO ("2022-11-14") INTERVAL 1 MONTH, - FROM ("2022-11-14") TO ("2023-01-03") INTERVAL 1 WEEK, - FROM ("2023-01-03") TO ("2023-01-14") INTERVAL 1 DAY, - PARTITION p_20230114 VALUES [('2023-01-14'), ('2023-01-15')) - ) DISTRIBUTED BY HASH(k2) BUCKETS 1 - PROPERTIES( - "replication_num" = "1" - ); - ``` - ```sql - CREATE TABLE create_table_multi_partion_date_hour - ( - k1 DATETIME, - k2 INT, - V1 VARCHAR(20) - ) PARTITION BY RANGE (k1) ( - FROM ("2023-01-03 12") TO ("2023-01-14 22") INTERVAL 1 HOUR - ) DISTRIBUTED BY HASH(k2) BUCKETS 1 - PROPERTIES( - "replication_num" = "1" - ); - ``` - ```sql - CREATE TABLE create_table_multi_partion_integer - ( - k1 BIGINT, - k2 INT, - V1 VARCHAR(20) - ) PARTITION BY RANGE (k1) ( - FROM (1) TO (100) INTERVAL 10 - ) DISTRIBUTED BY HASH(k2) BUCKETS 1 - PROPERTIES( - "replication_num" = "1" - ); - ``` - -NOTE: Multi Partition can be mixed with conventional manual creation of partitions. When using, you need to limit the partition column to only one, The default maximum number of partitions created in multi partition is 4096, This parameter can be adjusted in fe configuration `max_multi_partition_num`. - - - - - -14. Add a duplicate without sorting column table - - ```sql - CREATE TABLE example_db.table_hash - ( - k1 DATE, - k2 DECIMAL(10, 2) DEFAULT "10.5", - k3 CHAR(10) COMMENT "string column", - k4 INT NOT NULL DEFAULT "1" COMMENT "int column" - ) - COMMENT "duplicate without keys" - PARTITION BY RANGE(k1) - ( - PARTITION p1 VALUES LESS THAN ("2020-02-01"), - PARTITION p2 VALUES LESS THAN ("2020-03-01"), - PARTITION p3 VALUES LESS THAN ("2020-04-01") - ) - DISTRIBUTED BY HASH(k1) BUCKETS 32 - PROPERTIES ( - "replication_num" = "1", - "enable_duplicate_without_keys_by_default" = "true" - ); - ``` - - - -### Keywords - - CREATE, TABLE - -### Best Practice - -#### Partitioning and bucketing - -A table must specify the bucket column, but it does not need to specify the partition. For the specific introduction of partitioning and bucketing, please refer to the [Data Division](../../../../data-table/data-partition.md) document. - -Tables in Doris can be divided into partitioned tables and non-partitioned tables. This attribute is determined when the table is created and cannot be changed afterwards. That is, for partitioned tables, you can add or delete partitions in the subsequent use process, and for non-partitioned tables, you can no longer perform operations such as adding partitions afterwards. - -At the same time, partitioning columns and bucketing columns cannot be changed after the table is created. You can neither change the types of partitioning and bucketing columns, nor do any additions or deletions to these columns. - -Therefore, it is recommended to confirm the usage method to build the table reasonably before building the table. - -#### Dynamic Partition - -The dynamic partition function is mainly used to help users automatically manage partitions. By setting certain rules, the Doris system regularly adds new partitions or deletes historical partitions. Please refer to [Dynamic Partition](../../../../advanced/partition/dynamic-partition.md) document for more help. - -#### Materialized View - -Users can create multiple materialized views (ROLLUP) while building a table. Materialized views can also be added after the table is built. It is convenient for users to create all materialized views at one time by writing in the table creation statement. - -If the materialized view is created when the table is created, all subsequent data import operations will synchronize the data of the materialized view to be generated. The number of materialized views may affect the efficiency of data import. - -If you add a materialized view in the subsequent use process, if there is data in the table, the creation time of the materialized view depends on the current amount of data. - -For the introduction of materialized views, please refer to the document [materialized views](../../../../query-acceleration/materialized-view.md). - -#### Index - -Users can create indexes on multiple columns while building a table. Indexes can also be added after the table is built. - -If you add an index in the subsequent use process, if there is data in the table, you need to rewrite all the data, so the creation time of the index depends on the current data volume. - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-VIEW.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-VIEW.md deleted file mode 100644 index f5edf081c3579f..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-VIEW.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -{ - "title": "CREATE-VIEW", - "language": "en" -} ---- - - - -## CREATE-VIEW - -### Name - -CREATE VIEW - -### Description - -This statement is used to create a logical view -grammar: - -```sql -CREATE VIEW [IF NOT EXISTS] - [db_name.]view_name - (column1[ COMMENT "col comment"][, column2, ...]) -AS query_stmt -```` - - -illustrate: - -- Views are logical views and have no physical storage. All queries on the view are equivalent to the sub-queries corresponding to the view. -- query_stmt is any supported SQL - -### Example - -1. Create the view example_view on example_db - - ```sql - CREATE VIEW example_db.example_view (k1, k2, k3, v1) - AS - SELECT c1 as k1, k2, k3, SUM(v1) FROM example_table - WHERE k1 = 20160112 GROUP BY k1,k2,k3; - ```` - -2. Create a view with a comment - - ```sql - CREATE VIEW example_db.example_view - ( - k1 COMMENT "first key", - k2 COMMENT "second key", - k3 COMMENT "third key", - v1 COMMENT "first value" - ) - COMMENT "my first view" - AS - SELECT c1 as k1, k2, k3, SUM(v1) FROM example_table - WHERE k1 = 20160112 GROUP BY k1,k2,k3; - ```` - -### Keywords - - CREATE, VIEW - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-WORKLOAD-GROUP.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-WORKLOAD-GROUP.md deleted file mode 100644 index a9712040b1415c..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Create/CREATE-WORKLOAD-GROUP.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -{ - "title": "CREATE-WORKLOAD-GORUP", - "language": "en" -} ---- - - - -## CREATE-WORKLOAD-GROUP - -### Name - -CREATE WORKLOAD GROUP - - - -### Description - -This statement is used to create a workload group. Workload groups enable the isolation of cpu resources and memory resources on a single be. - -grammar: - -```sql -CREATE WORKLOAD GROUP [IF NOT EXISTS] "rg_name" -PROPERTIES ( - property_list -); -``` - -illustrate: - -Properties supported by property_list: - -* cpu_share: Required, used to set how much cpu time the workload group can acquire, which can achieve soft isolation of cpu resources. cpu_share is a relative value indicating the weight of cpu resources available to the running workload group. For example, if a user creates 3 workload groups rg-a, rg-b and rg-c with cpu_share of 10, 30 and 40 respectively, and at a certain moment rg-a and rg-b are running tasks while rg-c has no tasks, then rg-a can get (10 / (10 + 30)) = 25% of the cpu resources while workload group rg-b can get 75% of the cpu resources. If the system has only one workload group running, it gets all the cpu resources regardless of the value of its cpu_share. - -* memory_limit: Required, set the percentage of be memory that can be used by the workload group. The absolute value of the workload group memory limit is: `physical_memory * mem_limit * memory_limit`, where mem_limit is a be configuration item. The total memory_limit of all workload groups in the system must not exceed 100%. Workload groups are guaranteed to use the memory_limit for the tasks in the group in most cases. When the workload group memory usage exceeds this limit, tasks in the group with larger memory usage may be canceled to release the excess memory, refer to enable_memory_overcommit. - -* enable_memory_overcommit: Optional, enable soft memory isolation for the workload group, default is false. if set to false, the workload group is hard memory isolated and the tasks with the largest memory usage will be canceled immediately after the workload group memory usage exceeds the limit to release the excess memory. if set to true, the workload group is hard memory isolated and the tasks with the largest memory usage will be canceled immediately after the workload group memory usage exceeds the limit to release the excess memory. if set to true, the workload group is softly isolated, if the system has free memory resources, the workload group can continue to use system memory after exceeding the memory_limit limit, and when the total system memory is tight, it will cancel several tasks in the group with the largest memory occupation, releasing part of the excess memory to relieve the system memory pressure. It is recommended that when this configuration is enabled for a workload group, the total memory_limit of all workload groups should be less than 100%, and the remaining portion should be used for workload group memory overcommit. - -### Example - -1. Create a workload group named g1: - - ```sql - create workload group if not exists g1 - properties ( - "cpu_share"="10", - "memory_limit"="30%", - "enable_memory_overcommit"="true" - ); - ``` - -### Keywords - - CREATE, WORKLOAD, GROUP - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-ASYNC-MATERIALIZED-VIEW.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-ASYNC-MATERIALIZED-VIEW.md deleted file mode 100644 index a8bf6abafd1d1a..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-ASYNC-MATERIALIZED-VIEW.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -{ - "title": "DROP-ASYNC-MATERIALIZED-VIEW", - "language": "en" -} ---- - - - -## DROP-ASYNC-MATERIALIZED-VIEW - -### Name - -DROP ASYNC MATERIALIZED VIEW - -### Description - -This statement is used to delete asynchronous materialized views. - -syntax: - -```sql -DROP MATERIALIZED VIEW (IF EXISTS)? mvName=multipartIdentifier -``` - - -1. IF EXISTS: - If the materialized view does not exist, do not throw an error. If this keyword is not declared and the materialized view does not exist, an error will be reported. - -2. mv_name: - The name of the materialized view to be deleted. Required field. - -### Example - -1. Delete table materialized view mv1 - -```sql -DROP MATERIALIZED VIEW mv1; -``` -2.If present, delete the materialized view of the specified database - -```sql -DROP MATERIALIZED VIEW IF EXISTS db1.mv1; -``` - -### Keywords - - DROP, ASYNC, MATERIALIZED, VIEW - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-CATALOG.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-CATALOG.md deleted file mode 100644 index 8ce48c1fe28462..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-CATALOG.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "DROP-CATALOG", - "language": "en" -} ---- - - - -## DROP-CATALOG - -### Name - -CREATE CATALOG - -### Description - -This statement is used to delete the external catalog. - -Syntax: - -```sql -DROP CATALOG [I EXISTS] catalog_name; -``` - -### Example - -1. Drop catalog hive - - ```sql - DROP CATALOG hive; - ``` - -### Keywords - -DROP, CATALOG - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-DATABASE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-DATABASE.md deleted file mode 100644 index 86deed832c6432..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-DATABASE.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -{ - "title": "DROP-DATABASE", - "language": "en" -} ---- - - - -## DROP-DATABASE - -### Name - -DOPR DATABASE - -### Description - -This statement is used to delete the database (database) -grammar: - -```sql -DROP DATABASE [IF EXISTS] db_name [FORCE]; -```` - -illustrate: - -- During the execution of DROP DATABASE, the deleted database can be recovered through the RECOVER statement. See the [RECOVER](../../Database-Administration-Statements/RECOVER.md) statement for details -- If you execute DROP DATABASE FORCE, the system will not check the database for unfinished transactions, the database will be deleted directly and cannot be recovered, this operation is generally not recommended - -### Example - -1. Delete the database db_test - - ```sql - DROP DATABASE db_test; - ```` - -### Keywords - - DROP, DATABASE - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-ENCRYPT-KEY.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-ENCRYPT-KEY.md deleted file mode 100644 index ecabb7d26464df..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-ENCRYPT-KEY.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -{ - "title": "DROP-ENCRYPT-KEY", - "language": "en" -} ---- - - - -## DROP-ENCRYPT-KEY - -### Name - -DROP ENCRYPTKEY - -### Description - -grammar: - -```sql -DROP ENCRYPTKEY key_name -```` - -Parameter Description: - -- `key_name`: The name of the key to delete, can include the name of the database. For example: `db1.my_key`. - -Delete a custom key. The name of the key is exactly the same to be deleted. - -Executing this command requires the user to have `ADMIN` privileges. - -### Example - -1. Delete a key - - ```sql - DROP ENCRYPTKEY my_key; - ```` - -### Keywords - - DROP, ENCRYPT, KEY - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-FILE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-FILE.md deleted file mode 100644 index 68f213de24b3f3..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-FILE.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -{ - "title": "DROP-FILE", - "language": "en" -} ---- - - - -## DROP-FILE - -### Name - -DROP FILE - -### Description - -This statement is used to delete an uploaded file. - -grammar: - -```sql -DROP FILE "file_name" [FROM database] -[properties] -```` - -illustrate: - -- file_name: file name. -- database: a db to which the file belongs, if not specified, the db of the current session is used. -- properties supports the following parameters: - - `catalog`: Required. The category the file belongs to. - -### Example - -1. Delete the file ca.pem - - ```sql - DROP FILE "ca.pem" properties("catalog" = "kafka"); - ```` - -### Keywords - - DROP, FILE - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-FUNCTION.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-FUNCTION.md deleted file mode 100644 index f27fe24187c213..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-FUNCTION.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -{ - "title": "DROP-FUNCTION", - "language": "en" -} ---- - - - -## DROP-FUNCTION - -### Name - -DROP FUNCTION - -### Description - -Delete a custom function. Function names and parameter types are exactly the same to be deleted. - -grammar: - -```sql -DROP [GLOBAL] FUNCTION function_name - (arg_type [, ...]) -```` - -Parameter Description: - -- `function_name`: the name of the function to delete -- `arg_type`: the argument list of the function to delete - -### Example - -1. Delete a function - - ```sql - DROP FUNCTION my_add(INT, INT) - ```` -2. Delete a global function - - ```sql - DROP GLOBAL FUNCTION my_add(INT, INT) - ```` - -### Keywords - - DROP, FUNCTION - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-INDEX.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-INDEX.md deleted file mode 100644 index d4276a1e82bc21..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-INDEX.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "DROP-INDEX", - "language": "en" -} ---- - - - -## DROP-INDEX - -### Name - -DROP INDEX - -### Description - -This statement is used to delete the index of the specified name from a table. -grammar: - -```sql -DROP INDEX [IF EXISTS] index_name ON [db_name.]table_name; -```` - -### Example - -1. Delete the index - - ```sql - DROP INDEX [IF NOT EXISTS] index_name ON table1 ; - ```` - -### Keywords - - DROP, INDEX - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-JOB.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-JOB.md deleted file mode 100644 index ecbc1837ee938c..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-JOB.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ -"title": "DROP-JOB", -"language": "en" -} ---- - - - -## DROP-JOB - -### Name - -DROP JOB - -### Description - -User stops a JOB. A stopped job cannot be resumed. - -```sql -DROP JOB WHERE jobName = 'job_name'; -``` - -### Example - -1. DROP the job named test1. - - ```sql - DROP JOB where jobName = 'test1'; - ``` - -### Keywords - - DROP, JOB - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-MATERIALIZED-VIEW.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-MATERIALIZED-VIEW.md deleted file mode 100644 index 42960e36c872e5..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-MATERIALIZED-VIEW.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -{ - "title": "DROP-MATERIALIZED-VIEW", - "language": "en" -} ---- - - - -## DROP-MATERIALIZED-VIEW - -### Name - -DROP MATERIALIZED VIEW - -### Description - -This statement is used to drop a materialized view. Synchronous syntax - -grammar: - -```sql -DROP MATERIALIZED VIEW [IF EXISTS] mv_name ON table_name; -```` - - -1. IF EXISTS: - Do not throw an error if the materialized view does not exist. If this keyword is not declared, an error will be reported if the materialized view does not exist. - -2. mv_name: - The name of the materialized view to delete. Required. - -3. table_name: - The name of the table to which the materialized view to be deleted belongs. Required. - -### Example - -The table structure is - -```sql -mysql> desc all_type_table all; -+----------------+-------+----------+------+------ -+---------+-------+ -| IndexName | Field | Type | Null | Key | Default | Extra | -+----------------+-------+----------+------+------ -+---------+-------+ -| all_type_table | k1 | TINYINT | Yes | true | N/A | | -| | k2 | SMALLINT | Yes | false | N/A | NONE | -| | k3 | INT | Yes | false | N/A | NONE | -| | k4 | BIGINT | Yes | false | N/A | NONE | -| | k5 | LARGEINT | Yes | false | N/A | NONE | -| | k6 | FLOAT | Yes | false | N/A | NONE | -| | k7 | DOUBLE | Yes | false | N/A | NONE | -| | | | | | | | | -| k1_sumk2 | k1 | TINYINT | Yes | true | N/A | | -| | k2 | SMALLINT | Yes | false | N/A | SUM | -+----------------+-------+----------+------+------ -+---------+-------+ -```` - -1. Drop the materialized view named k1_sumk2 of the table all_type_table - - ```sql - drop materialized view k1_sumk2 on all_type_table; - ```` - - The table structure after the materialized view is deleted - - ````text - +----------------+-------+----------+------+------ -+---------+-------+ - | IndexName | Field | Type | Null | Key | Default | Extra | - +----------------+-------+----------+------+------ -+---------+-------+ - | all_type_table | k1 | TINYINT | Yes | true | N/A | | - | | k2 | SMALLINT | Yes | false | N/A | NONE | - | | k3 | INT | Yes | false | N/A | NONE | - | | k4 | BIGINT | Yes | false | N/A | NONE | - | | k5 | LARGEINT | Yes | false | N/A | NONE | - | | k6 | FLOAT | Yes | false | N/A | NONE | - | | k7 | DOUBLE | Yes | false | N/A | NONE | - +----------------+-------+----------+------+------ -+---------+-------+ - ```` - -2. Drop a non-existent materialized view in the table all_type_table - - ```sql - drop materialized view k1_k2 on all_type_table; - ERROR 1064 (HY000): errCode = 2, detailMessage = Materialized view [k1_k2] does not exist in table [all_type_table] - ```` - - The delete request reports an error directly - -3. Delete the materialized view k1_k2 in the table all_type_table, if it does not exist, no error will be reported. - - ```sql - drop materialized view if exists k1_k2 on all_type_table; - Query OK, 0 rows affected (0.00 sec) - ```` - - If it exists, delete it, if it does not exist, no error is reported. - -### Keywords - - DROP, MATERIALIZED, VIEW - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-POLICY.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-POLICY.md deleted file mode 100644 index 2de113d2e001e4..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-POLICY.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -{ - "title": "DROP-MATERIALIZED-VIEW", - "language": "en" -} ---- - - - -## DROP-POLICY - -### Name - -DROP POLICY - -### Description - -drop policy for row or storage - -#### ROW POLICY - -Grammar: - -1. Drop row policy -```sql -DROP ROW POLICY test_row_policy_1 on table1 [FOR user| ROLE role]; -``` - -2. Drop storage policy -```sql -DROP STORAGE POLICY policy_name1 -``` - -### Example - -1. Drop the row policy for table1 named test_row_policy_1 - - ```sql - DROP ROW POLICY test_row_policy_1 on table1 - ``` - -2. Drop the row policy for table1 using by user test - - ```sql - DROP ROW POLICY test_row_policy_1 on table1 for test - ``` - -3. Drop the row policy for table1 using by role1 - - ```sql - DROP ROW POLICY test_row_policy_1 on table1 for role role1 - ``` - -4. Drop the storage policy named policy_name1 -```sql -DROP STORAGE POLICY policy_name1 -``` - -### Keywords - - DROP, POLICY - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-RESOURCE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-RESOURCE.md deleted file mode 100644 index 5edcfe30186f72..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-RESOURCE.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "DROP-RESOURCE", - "language": "en" -} ---- - - - -## DROP-RESOURCE - -### Name - -DROP RESOURCE - -### Description - -This statement is used to delete an existing resource. Only the root or admin user can delete resources. -grammar: - -```sql -DROP RESOURCE 'resource_name' -```` - -Note: ODBC/S3 resources in use cannot be deleted. - -### Example - -1. Delete the Spark resource named spark0: - - ```sql - DROP RESOURCE 'spark0'; - ```` - -### Keywords - - DROP, RESOURCE - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-SQL-BLOCK-RULE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-SQL-BLOCK-RULE.md deleted file mode 100644 index 5180a95a469929..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-SQL-BLOCK-RULE.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -{ - "title": "DROP-DATABASE", - "language": "en" -} ---- - - - -## DROP-SQL-BLOCK-RULE - -### Name - -DROP SQL BLOCK RULE - -### Description - -Delete SQL blocking rules, support multiple rules, separated by , - -grammar: - -```sql -DROP SQL_BLOCK_RULE test_rule1,... -```` - -### Example - -1. Delete the test_rule1 and test_rule2 blocking rules - - ```sql - mysql> DROP SQL_BLOCK_RULE test_rule1,test_rule2; - Query OK, 0 rows affected (0.00 sec) - ```` - -### Keywords - -````text -DROP, SQL_BLOCK_RULE -```` - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-TABLE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-TABLE.md deleted file mode 100644 index 8bb21c8e6e1f87..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-TABLE.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -{ - "title": "DROP-TABLE", - "language": "en" -} ---- - - - -## DROP-TABLE - -### Name - -DROP TABLE - -### Description - -This statement is used to drop a table. -grammar: - -```sql -DROP TABLE [IF EXISTS] [db_name.]table_name [FORCE]; -```` - - -illustrate: - -- After executing DROP TABLE for a period of time, the dropped table can be recovered through the RECOVER statement. See [RECOVER](../../../../sql-manual/sql-reference/Database-Administration-Statements/RECOVER.md) statement for details -- If you execute DROP TABLE FORCE, the system will not check whether there are unfinished transactions in the table, the table will be deleted directly and cannot be recovered, this operation is generally not recommended - -### Example - -1. Delete a table - - ```sql - DROP TABLE my_table; - ```` - -2. If it exists, delete the table of the specified database - - ```sql - DROP TABLE IF EXISTS example_db.my_table; - ```` - -### Keywords - - DROP, TABLE - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-WORKLOAD-GROUP.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-WORKLOAD-GROUP.md deleted file mode 100644 index e87ff941de577b..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/DROP-WORKLOAD-GROUP.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "DROP-WORKLOAD-GROUP", - "language": "en" -} ---- - - - -## DROP-WORKLOAD-GROUP - -### Name - -DROP WORKLOAD GROUP - - - -### Description - -This statement is used to delete a workload group. - -```sql -DROP WORKLOAD GROUP [IF EXISTS] 'rg_name' -``` - -### Example - -1. Delete the workload group named g1: - - ```sql - drop workload group if exists g1; - ``` - -### Keywords - - DROP, WORKLOAD, GROUP - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/TRUNCATE-TABLE.md b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/TRUNCATE-TABLE.md deleted file mode 100644 index c73e1ec87a73e7..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Drop/TRUNCATE-TABLE.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -{ - "title": "TRUNCATE-TABLE", - "language": "en" -} ---- - - - -## TRUNCATE-TABLE - -### Name - -TRUNCATE TABLE - -### Description - -This statement is used to clear the data of the specified table and partition -grammar: - -```sql -TRUNCATE TABLE [db.]tbl[ PARTITION(p1, p2, ...)]; -```` - -illustrate: - -- The statement clears the data, but leaves the table or partition. -- Unlike DELETE, this statement can only clear the specified table or partition as a whole, and cannot add filter conditions. -- Unlike DELETE, using this method to clear data will not affect query performance. -- The data deleted by this operation cannot be recovered. -- When using this command, the table status needs to be NORMAL, that is, operations such as SCHEMA CHANGE are not allowed. -- This command may cause the ongoing load to fail - -### Example - -1. Clear the table tbl under example_db - - ```sql - TRUNCATE TABLE example_db.tbl; - ```` - -2. Empty p1 and p2 partitions of table tbl - - ```sql - TRUNCATE TABLE tbl PARTITION(p1, p2); - ```` - -### Keywords - - TRUNCATE, TABLE - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/ALTER-ROUTINE-LOAD.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/ALTER-ROUTINE-LOAD.md deleted file mode 100644 index 574ff832b60478..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/ALTER-ROUTINE-LOAD.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -{ - "title": "ALTER-ROUTINE-LOAD", - "language": "en" -} ---- - - - -## ALTER-ROUTINE-LOAD - -### Name - -ALTER ROUTINE LOAD - -### Description - -This syntax is used to modify an already created routine import job. - -Only jobs in the PAUSED state can be modified. - -grammar: - -```sql -ALTER ROUTINE LOAD FOR [db.]job_name -[job_properties] -FROM data_source -[data_source_properties] -```` - -1. `[db.]job_name` - - Specifies the job name to modify. - -2. `tbl_name` - - Specifies the name of the table to be imported. - -3. `job_properties` - - Specifies the job parameters that need to be modified. Currently, only the modification of the following parameters is supported: - - 1. `desired_concurrent_number` - 2. `max_error_number` - 3. `max_batch_interval` - 4. `max_batch_rows` - 5. `max_batch_size` - 6. `jsonpaths` - 7. `json_root` - 8. `strip_outer_array` - 9. `strict_mode` - 10. `timezone` - 11. `num_as_string` - 12. `fuzzy_parse` - 13. `partial_columns` - 14. `max_filter_ratio` - - -4. `data_source` - - The type of data source. Currently supports: - - KAFKA - -5. `data_source_properties` - - Relevant properties of the data source. Currently only supports: - - 1. `kafka_partitions` - 2. `kafka_offsets` - 3. `kafka_broker_list` - 4. `kafka_topic` - 5. Custom properties, such as `property.group.id` - - Note: - - 1. `kafka_partitions` and `kafka_offsets` are used to modify the offset of the kafka partition to be consumed, only the currently consumed partition can be modified. Cannot add partition. - -### Example - -1. Change `desired_concurrent_number` to 1 - - ```sql - ALTER ROUTINE LOAD FOR db1.label1 - PROPERTIES - ( - "desired_concurrent_number" = "1" - ); - ```` - -2. Modify `desired_concurrent_number` to 10, modify the offset of the partition, and modify the group id. - - ```sql - ALTER ROUTINE LOAD FOR db1.label1 - PROPERTIES - ( - "desired_concurrent_number" = "10" - ) - FROM kafka - ( - "kafka_partitions" = "0, 1, 2", - "kafka_offsets" = "100, 200, 100", - "property.group.id" = "new_group" - ); - ```` - -### Keywords - - ALTER, ROUTINE, LOAD - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/BROKER-LOAD.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/BROKER-LOAD.md deleted file mode 100644 index 63a58a99574a6b..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/BROKER-LOAD.md +++ /dev/null @@ -1,572 +0,0 @@ ---- -{ - "title": "BROKER-LOAD", - "language": "en" -} ---- - - - -## BROKER-LOAD - -### Name - -BROKER LOAD - -### Description - -This command is mainly used to import data on remote storage (such as S3, HDFS) through the Broker service process. - -```sql -LOAD LABEL load_label -( -data_desc1[, data_desc2, ...] -) -WITH BROKER broker_name -[broker_properties] -[load_properties] -[COMMENT "comment"]; -``` - -- `load_label` - - Each import needs to specify a unique Label. You can use this label to view the progress of the job later. - - `[database.]label_name` - -- `data_desc1` - - Used to describe a set of files that need to be imported. - - ```sql - [MERGE|APPEND|DELETE] - DATA INFILE - ( - "file_path1"[, file_path2, ...] - ) - [NEGATIVE] - INTO TABLE `table_name` - [PARTITION (p1, p2, ...)] - [COLUMNS TERMINATED BY "column_separator"] - [LINES TERMINATED BY "line_delimiter"] - [FORMAT AS "file_type"] - [COMPRESS_TYPE AS "compress_type"] - [(column_list)] - [COLUMNS FROM PATH AS (c1, c2, ...)] - [SET (column_mapping)] - [PRECEDING FILTER predicate] - [WHERE predicate] - [DELETE ON expr] - [ORDER BY source_sequence] - [PROPERTIES ("key1"="value1", ...)] - ```` - - - `[MERGE|APPEND|DELETE]` - - Data merge type. The default is APPEND, indicating that this import is a normal append write operation. The MERGE and DELETE types are only available for Unique Key model tables. The MERGE type needs to be used with the `[DELETE ON]` statement to mark the Delete Flag column. The DELETE type indicates that all data imported this time are deleted data. - - - `DATA INFILE` - - Specify the file path to be imported. Can be multiple. Wildcards can be used. The path must eventually match to a file, if it only matches a directory the import will fail. - - - `NEGATIVE` - - This keyword is used to indicate that this import is a batch of "negative" imports. This method is only for aggregate data tables with integer SUM aggregate type. This method will reverse the integer value corresponding to the SUM aggregate column in the imported data. Mainly used to offset previously imported wrong data. - - - `PARTITION(p1, p2, ...)` - - You can specify to import only certain partitions of the table. Data that is no longer in the partition range will be ignored. - - - `COLUMNS TERMINATED BY` - - Specifies the column separator. Only valid in CSV format. Only single-byte delimiters can be specified. - - - `LINES TERMINATED BY` - - Specifies the line delimiter. Only valid in CSV format. Only single-byte delimiters can be specified. - - - `FORMAT AS` - - Specifies the file type, CSV, PARQUET and ORC formats are supported. Default is CSV. - - - `COMPRESS_TYPE AS` - Specifies the file compress type, GZ/LZO/BZ2/LZ4FRAME/DEFLATE/LZOP. Only valid in CSV or JSON format. - - - `column list` - - Used to specify the column order in the original file. For a detailed introduction to this part, please refer to the [Column Mapping, Conversion and Filtering](../../../../data-operate/import/import-scenes/load-data-convert.md) document. - - `(k1, k2, tmpk1)` - - - `COLUMNS FROM PATH AS` - - Specifies the columns to extract from the import file path. - - - `SET (column_mapping)` - - Specifies the conversion function for the column. - - - `PRECEDING FILTER predicate` - - Pre-filter conditions. The data is first concatenated into raw data rows in order according to `column list` and `COLUMNS FROM PATH AS`. Then filter according to the pre-filter conditions. For a detailed introduction to this part, please refer to the [Column Mapping, Conversion and Filtering](../../../../data-operate/import/import-scenes/load-data-convert.md) document. - - - `WHERE predicate` - - Filter imported data based on conditions. For a detailed introduction to this part, please refer to the [Column Mapping, Conversion and Filtering](../../../../data-operate/import/import-scenes/load-data-convert.md) document. - - - `DELETE ON expr` - - It needs to be used with the MEREGE import mode, only for the table of the Unique Key model. Used to specify the columns and calculated relationships in the imported data that represent the Delete Flag. - - - `ORDER BY` - - Tables only for the Unique Key model. Used to specify the column in the imported data that represents the Sequence Col. Mainly used to ensure data order when importing. - - - `PROPERTIES ("key1"="value1", ...)` - - Specify some parameters of the imported format. For example, if the imported file is in `json` format, you can specify parameters such as `json_root`, `jsonpaths`, `fuzzy parse`, etc. - - - enclose - - When the csv data field contains row delimiters or column delimiters, to prevent accidental truncation, single-byte characters can be specified as brackets for protection. For example, the column separator is ",", the bracket is "'", and the data is "a,'b,c'", then "b,c" will be parsed as a field. - - - escape - - Used to escape characters that appear in a csv field identical to the enclosing characters. For example, if the data is "a,'b,'c'", enclose is "'", and you want "b,'c to be parsed as a field, you need to specify a single-byte escape character, such as "\", and then modify the data to "a,' b,\'c'". - -- `WITH BROKER broker_name` - - Specify the Broker service name to be used. In the public cloud Doris. Broker service name is `bos` - -- `broker_properties` - - Specifies the information required by the broker. This information is usually used by the broker to be able to access remote storage systems. Such as BOS or HDFS. See the [Broker](../../../../advanced/broker.md) documentation for specific information. - - ````text - ( - "key1" = "val1", - "key2" = "val2", - ... - ) - ```` - -- `load_properties` - - Specifies import-related parameters. The following parameters are currently supported: - - - `timeout` - - Import timeout. The default is 4 hours. in seconds. - - - `max_filter_ratio` - - The maximum tolerable proportion of data that can be filtered (for reasons such as data irregularity). Zero tolerance by default. The value range is 0 to 1. - - - `exec_mem_limit` - - Import memory limit. Default is 2GB. The unit is bytes. - - - `strict_mode` - - Whether to impose strict restrictions on data. Defaults to false. - - - `partial_columns` - - Boolean type, True means that use partial column update, the default value is false, this parameter is only allowed to be set when the table model is Unique and Merge on Write is used. - - - `timezone` - - Specify the time zone for some functions that are affected by time zones, such as `strftime/alignment_timestamp/from_unixtime`, etc. Please refer to the [timezone](../../../../advanced/time-zone.md) documentation for details. If not specified, the "Asia/Shanghai" timezone is used - - - `load_parallelism` - - It allows the user to set the parallelism of the load execution plan - on a single node when the broker load is submitted, default value is 1. - - - `send_batch_parallelism` - - Used to set the default parallelism for sending batch, if the value for parallelism exceed `max_send_batch_parallelism_per_job` in BE config, then the coordinator BE will use the value of `max_send_batch_parallelism_per_job`. - - - `load_to_single_tablet` - - Boolean type, True means that one task can only load data to one tablet in the corresponding partition at a time. The default value is false. The number of tasks for the job depends on the overall concurrency. This parameter can only be set when loading data into the OLAP table with random bucketing. - - - priority - - Set the priority of the load job, there are three options: `HIGH/NORMAL/LOW`, use `NORMAL` priority as default. The pending broker load jobs which have higher priority will be chosen to execute earlier. - -- comment - - Specify the comment for the import job. The comment can be viewed in the `show load` statement. - -### Example - -1. Import a batch of data from HDFS - - ```sql - LOAD LABEL example_db.label1 - ( - DATA INFILE("hdfs://hdfs_host:hdfs_port/input/file.txt") - INTO TABLE `my_table` - COLUMNS TERMINATED BY "," - ) - WITH BROKER hdfs - ( - "username"="hdfs_user", - "password"="hdfs_password" - ); - ```` - - Import the file `file.txt`, separated by commas, into the table `my_table`. - -2. Import data from HDFS, using wildcards to match two batches of files in two batches. into two tables separately. - - ```sql - LOAD LABEL example_db.label2 - ( - DATA INFILE("hdfs://hdfs_host:hdfs_port/input/file-10*") - INTO TABLE `my_table1` - PARTITION (p1) - COLUMNS TERMINATED BY "," - (k1, tmp_k2, tmp_k3) - SET ( - k2 = tmp_k2 + 1, - k3 = tmp_k3 + 1 - ) - DATA INFILE("hdfs://hdfs_host:hdfs_port/input/file-20*") - INTO TABLE `my_table2` - COLUMNS TERMINATED BY "," - (k1, k2, k3) - ) - WITH BROKER hdfs - ( - "username"="hdfs_user", - "password"="hdfs_password" - ); - ```` - - Import two batches of files `file-10*` and `file-20*` using wildcard matching. Imported into two tables `my_table1` and `my_table2` respectively. Where `my_table1` specifies to import into partition `p1`, and will import the values of the second and third columns in the source file +1. - -3. Import a batch of data from HDFS. - - ```sql - LOAD LABEL example_db.label3 - ( - DATA INFILE("hdfs://hdfs_host:hdfs_port/user/doris/data/*/*") - INTO TABLE `my_table` - COLUMNS TERMINATED BY "\\x01" - ) - WITH BROKER my_hdfs_broker - ( - "username" = "", - "password" = "", - "fs.defaultFS" = "hdfs://my_ha", - "dfs.nameservices" = "my_ha", - "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2", - "dfs.namenode.rpc-address.my_ha.my_namenode1" = "nn1_host:rpc_port", - "dfs.namenode.rpc-address.my_ha.my_namenode2" = "nn2_host:rpc_port", - "dfs.client.failover.proxy.provider" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" - ); - ```` - - Specify the delimiter as Hive's default delimiter `\\x01`, and use the wildcard * to specify all files in all directories under the `data` directory. Use simple authentication while configuring namenode HA. - -4. Import data in Parquet format and specify FORMAT as parquet. The default is to judge by the file suffix - - ```sql - LOAD LABEL example_db.label4 - ( - DATA INFILE("hdfs://hdfs_host:hdfs_port/input/file") - INTO TABLE `my_table` - FORMAT AS "parquet" - (k1, k2, k3) - ) - WITH BROKER hdfs - ( - "username"="hdfs_user", - "password"="hdfs_password" - ); - ```` - -5. Import the data and extract the partition field in the file path - - ```sql - LOAD LABEL example_db.label10 - ( - DATA INFILE("hdfs://hdfs_host:hdfs_port/input/city=beijing/*/*") - INTO TABLE `my_table` - FORMAT AS "csv" - (k1, k2, k3) - COLUMNS FROM PATH AS (city, utc_date) - ) - WITH BROKER hdfs - ( - "username"="hdfs_user", - "password"="hdfs_password" - ); - ```` - - The columns in the `my_table` table are `k1, k2, k3, city, utc_date`. - - The `hdfs://hdfs_host:hdfs_port/user/doris/data/input/dir/city=beijing` directory includes the following files: - - ````text - hdfs://hdfs_host:hdfs_port/input/city=beijing/utc_date=2020-10-01/0000.csv - hdfs://hdfs_host:hdfs_port/input/city=beijing/utc_date=2020-10-02/0000.csv - hdfs://hdfs_host:hdfs_port/input/city=tianji/utc_date=2020-10-03/0000.csv - hdfs://hdfs_host:hdfs_port/input/city=tianji/utc_date=2020-10-04/0000.csv - ```` - - The file only contains three columns of `k1, k2, k3`, and the two columns of `city, utc_date` will be extracted from the file path. - -6. Filter the data to be imported. - - ```sql - LOAD LABEL example_db.label6 - ( - DATA INFILE("hdfs://host:port/input/file") - INTO TABLE `my_table` - (k1, k2, k3) - SET ( - k2 = k2 + 1 - ) - PRECEDING FILTER k1 = 1 - WHERE k1 > k2 - ) - WITH BROKER hdfs - ( - "username"="user", - "password"="pass" - ); - ```` - - Only in the original data, k1 = 1, and after transformation, rows with k1 > k2 will be imported. - -7. Import data, extract the time partition field in the file path, and the time contains %3A (in the hdfs path, ':' is not allowed, all ':' will be replaced by %3A) - - ```sql - LOAD LABEL example_db.label7 - ( - DATA INFILE("hdfs://host:port/user/data/*/test.txt") - INTO TABLE `tbl12` - COLUMNS TERMINATED BY "," - (k2,k3) - COLUMNS FROM PATH AS (data_time) - SET ( - data_time=str_to_date(data_time, '%Y-%m-%d %H%%3A%i%%3A%s') - ) - ) - WITH BROKER hdfs - ( - "username"="user", - "password"="pass" - ); - ```` - - There are the following files in the path: - - ````text - /user/data/data_time=2020-02-17 00%3A00%3A00/test.txt - /user/data/data_time=2020-02-18 00%3A00%3A00/test.txt - ```` - - The table structure is: - - ````text - data_time DATETIME, - k2 INT, - k3 INT - ```` - -8. Import a batch of data from HDFS, specify the timeout and filter ratio. Broker with clear text my_hdfs_broker. Simple authentication. And delete the columns in the original data that match the columns with v2 greater than 100 in the imported data, and other columns are imported normally - - ```sql - LOAD LABEL example_db.label8 - ( - MERGE DATA INFILE("HDFS://test:802/input/file") - INTO TABLE `my_table` - (k1, k2, k3, v2, v1) - DELETE ON v2 > 100 - ) - WITH HDFS - ( - "hadoop.username"="user", - "password"="pass" - ) - PROPERTIES - ( - "timeout" = "3600", - "max_filter_ratio" = "0.1" - ); - ```` - - Import using the MERGE method. `my_table` must be a table with Unique Key. When the value of the v2 column in the imported data is greater than 100, the row is considered a delete row. - - The import task timeout is 3600 seconds, and the error rate is allowed to be within 10%. - -9. Specify the source_sequence column when importing to ensure the replacement order in the UNIQUE_KEYS table: - - ```sql - LOAD LABEL example_db.label9 - ( - DATA INFILE("HDFS://test:802/input/file") - INTO TABLE `my_table` - COLUMNS TERMINATED BY "," - (k1,k2,source_sequence,v1,v2) - ORDER BY source_sequence - ) - WITH HDFS - ( - "hadoop.username"="user", - "password"="pass" - ) - ```` - - `my_table` must be an Unique Key model table with Sequence Col specified. The data will be ordered according to the value of the `source_sequence` column in the source data. - -10. Import a batch of data from HDFS, specify the file format as `json`, and specify parameters of `json_root` and `jsonpaths`. - - ```sql - LOAD LABEL example_db.label10 - ( - DATA INFILE("HDFS://test:port/input/file.json") - INTO TABLE `my_table` - FORMAT AS "json" - PROPERTIES( - "json_root" = "$.item", - "jsonpaths" = "[$.id, $.city, $.code]" - ) - ) - with HDFS ( - "hadoop.username" = "user" - "password" = "" - ) - PROPERTIES - ( - "timeout"="1200", - "max_filter_ratio"="0.1" - ); - ``` - - `jsonpaths` can be use with `column list` and `SET(column_mapping)`: - - ```sql - LOAD LABEL example_db.label10 - ( - DATA INFILE("HDFS://test:port/input/file.json") - INTO TABLE `my_table` - FORMAT AS "json" - (id, code, city) - SET (id = id * 10) - PROPERTIES( - "json_root" = "$.item", - "jsonpaths" = "[$.id, $.code, $.city]" - ) - ) - with HDFS ( - "hadoop.username" = "user" - "password" = "" - ) - PROPERTIES - ( - "timeout"="1200", - "max_filter_ratio"="0.1" - ); - ``` - -11. Load data in csv format from cos(Tencent Cloud Object Storage). - - ```SQL - LOAD LABEL example_db.label10 - ( - DATA INFILE("cosn://my_bucket/input/file.csv") - INTO TABLE `my_table` - (k1, k2, k3) - ) - WITH BROKER "broker_name" - ( - "fs.cosn.userinfo.secretId" = "xxx", - "fs.cosn.userinfo.secretKey" = "xxxx", - "fs.cosn.bucket.endpoint_suffix" = "cos.xxxxxxxxx.myqcloud.com" - ) - ``` - -12. Load CSV date and trim double quotes and skip first 5 lines - - ```SQL - LOAD LABEL example_db.label12 - ( - DATA INFILE("cosn://my_bucket/input/file.csv") - INTO TABLE `my_table` - (k1, k2, k3) - PROPERTIES("trim_double_quotes" = "true", "skip_lines" = "5") - ) - WITH BROKER "broker_name" - ( - "fs.cosn.userinfo.secretId" = "xxx", - "fs.cosn.userinfo.secretKey" = "xxxx", - "fs.cosn.bucket.endpoint_suffix" = "cos.xxxxxxxxx.myqcloud.com" - ) - ``` - -### Keywords - - BROKER, LOAD - -### Best Practice - -1. Check the import task status - - Broker Load is an asynchronous import process. The successful execution of the statement only means that the import task is submitted successfully, and does not mean that the data import is successful. The import status needs to be viewed through the [SHOW LOAD](../../Show-Statements/SHOW-LOAD.md) command. - -2. Cancel the import task - - Import tasks that have been submitted but not yet completed can be canceled by the [CANCEL LOAD](./CANCEL-LOAD.md) command. After cancellation, the written data will also be rolled back and will not take effect. - -3. Label, import transaction, multi-table atomicity - - All import tasks in Doris are atomic. And the import of multiple tables in the same import task can also guarantee atomicity. At the same time, Doris can also use the Label mechanism to ensure that the data imported is not lost or heavy. For details, see the [Import Transactions and Atomicity](../../../../data-operate/import/import-scenes/load-atomicity.md) documentation. - -4. Column mapping, derived columns and filtering - - Doris can support very rich column transformation and filtering operations in import statements. Most built-in functions and UDFs are supported. For how to use this function correctly, please refer to the [Column Mapping, Conversion and Filtering](../../../../data-operate/import/import-scenes/load-data-convert.md) document. - -5. Error data filtering - - Doris' import tasks can tolerate a portion of malformed data. Tolerated via `max_filter_ratio` setting. The default is 0, which means that the entire import task will fail when there is an error data. If the user wants to ignore some problematic data rows, the secondary parameter can be set to a value between 0 and 1, and Doris will automatically skip the rows with incorrect data format. - - For some calculation methods of the tolerance rate, please refer to the [Column Mapping, Conversion and Filtering](../../../../data-operate/import/import-scenes/load-data-convert.md) document. - -6. Strict Mode - - The `strict_mode` attribute is used to set whether the import task runs in strict mode. The format affects the results of column mapping, transformation, and filtering. For a detailed description of strict mode, see the [strict mode](../../../../data-operate/import/import-scenes/load-strict-mode.md) documentation. - -7. Timeout - - The default timeout for Broker Load is 4 hours. from the time the task is submitted. If it does not complete within the timeout period, the task fails. - -8. Limits on data volume and number of tasks - - Broker Load is suitable for importing data within 100GB in one import task. Although theoretically there is no upper limit on the amount of data imported in one import task. But committing an import that is too large results in a longer run time, and the cost of retrying after a failure increases. - - At the same time, limited by the size of the cluster, we limit the maximum amount of imported data to the number of ComputeNode nodes * 3GB. In order to ensure the rational use of system resources. If there is a large amount of data to be imported, it is recommended to divide it into multiple import tasks. - - Doris also limits the number of import tasks running simultaneously in the cluster, usually ranging from 3 to 10. Import jobs submitted after that are queued. The maximum queue length is 100. Subsequent submissions will be rejected outright. Note that the queue time is also calculated into the total job time. If it times out, the job is canceled. Therefore, it is recommended to reasonably control the frequency of job submission by monitoring the running status of the job. - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CANCEL-LOAD.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CANCEL-LOAD.md deleted file mode 100644 index b96797babeee38..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CANCEL-LOAD.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -{ - "title": "CANCEL-LOAD", - "language": "en" -} ---- - - - -## CANCEL-LOAD - -### Name - -CANCEL LOAD - -### Description - -This statement is used to undo an import job for the specified label. Or batch undo import jobs via fuzzy matching - -```sql -CANCEL LOAD -[FROM db_name] -WHERE [LABEL = "load_label" | LABEL like "label_pattern" | STATE = "PENDING/ETL/LOADING"] -``` - -Notice: Cancel by State is supported since 1.2.0. - -### Example - -1. Cancel the import job whose label is `example_db_test_load_label` on the database example_db - - ```sql - CANCEL LOAD - FROM example_db - WHERE LABEL = "example_db_test_load_label"; - ```` - -2. Cancel all import jobs containing example* on the database example*db. - - ```sql - CANCEL LOAD - FROM example_db - WHERE LABEL like "example_"; - ```` - - - -3. Cancel all import jobs which state are "LOADING" - - ```sql - CANCEL LOAD - FROM example_db - WHERE STATE = "loading"; - ``` - - - -### Keywords - - CANCEL, LOAD - -### Best Practice - -1. Only pending import jobs in PENDING, ETL, LOADING state can be canceled. -2. When performing batch undo, Doris does not guarantee the atomic undo of all corresponding import jobs. That is, it is possible that only some of the import jobs were successfully undone. The user can view the job status through the SHOW LOAD statement and try to execute the CANCEL LOAD statement repeatedly. - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CLEAN-LABEL.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CLEAN-LABEL.md deleted file mode 100644 index 8f7abe201e136f..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CLEAN-LABEL.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -{ - "title": "CLEAN-LABEL", - "language": "en" -} ---- - - - -## CLEAN-LABEL - -### Name - -CLEAN LABEL - -### Description - -For manual cleanup of historical load jobs. After cleaning, the Label can be reused. - -Syntax: - -```sql -CLEAN LABEL [label] FROM db; -``` - -### Example - -1. Clean label label1 from database db1 - - ```sql - CLEAN LABEL label1 FROM db1; - ``` - -2. Clean all labels from database db1 - - ```sql - CLEAN LABEL FROM db1; - ``` - -### Keywords - - CLEAN, LABEL - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CLEAN-PROFILE.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CLEAN-PROFILE.md deleted file mode 100644 index e6c178fd4d3f78..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CLEAN-PROFILE.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "CLEAN-PROFILE", - "language": "en" -} ---- - - - -## CLEAN-PROFILE - -### Name - -CLEAN PROFILE - -### Description - -For manual cleanup all of historical query or load profile. - -Syntax: - -```sql -CLEAN ALL PROFILE; -``` - -### Example - -1. Clean all profile - - ```sql - CLEAN ALL PROFILE; - ``` - -### Keywords - - CLEAN, PROFILE - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CREATE-ROUTINE-LOAD.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CREATE-ROUTINE-LOAD.md deleted file mode 100644 index a42459a022bea8..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CREATE-ROUTINE-LOAD.md +++ /dev/null @@ -1,626 +0,0 @@ ---- -{ - "title": "CREATE-ROUTINE-LOAD", - "language": "en" -} ---- - - - -## CREATE-ROUTINE-LOAD - -### Name - -CREATE ROUTINE LOAD - -### Description - -The Routine Load function allows users to submit a resident import task, and import data into Doris by continuously reading data from a specified data source. - -Currently, only data in CSV or Json format can be imported from Kakfa through unauthenticated or SSL authentication. [Example of importing data in Json format](../../../../data-operate/import/import-way/routine-load-manual.md#Example_of_importing_data_in_Json_format) - -grammar: - -```sql -CREATE ROUTINE LOAD [db.]job_name [ON tbl_name] -[merge_type] -[load_properties] -[job_properties] -FROM data_source [data_source_properties] -[COMMENT "comment"] -``` - -- `[db.]job_name` - - The name of the import job. Within the same database, only one job with the same name can be running. - -- `tbl_name` - - Specifies the name of the table to be imported.Optional parameter, If not specified, the dynamic table method will - be used, which requires the data in Kafka to contain table name information. Currently, only the table name can be - obtained from the Kafka value, and it needs to conform to the format of "table_name|{"col1": "val1", "col2": "val2"}" - for JSON data. The "tbl_name" represents the table name, and "|" is used as the delimiter between the table name and - the table data. The same format applies to CSV data, such as "table_name|val1,val2,val3". It is important to note that - the "table_name" must be consistent with the table name in Doris, otherwise it may cause import failures. - - Tips: The `columns_mapping` parameter is not supported for dynamic tables. If your table structure is consistent with - the table structure in Doris and there is a large amount of table information to be imported, this method will be the - best choice. - -- `merge_type` - - Data merge type. The default is APPEND, which means that the imported data are ordinary append write operations. The MERGE and DELETE types are only available for Unique Key model tables. The MERGE type needs to be used with the [DELETE ON] statement to mark the Delete Flag column. The DELETE type means that all imported data are deleted data. - - Tips: When using dynamic multiple tables, please note that this parameter should be consistent with the type of each dynamic table, otherwise it will result in import failure. - -- load_properties - - Used to describe imported data. The composition is as follows: - - ````SQL - [column_separator], - [columns_mapping], - [preceding_filter], - [where_predicates], - [partitions], - [DELETE ON], - [ORDER BY] - ```` - - - `column_separator` - - Specifies the column separator, defaults to `\t` - - `COLUMNS TERMINATED BY ","` - - - `columns_mapping` - - It is used to specify the mapping relationship between file columns and columns in the table, as well as various column transformations. For a detailed introduction to this part, you can refer to the [Column Mapping, Transformation and Filtering] document. - - `(k1, k2, tmpk1, k3 = tmpk1 + 1)` - - Tips: Dynamic multiple tables are not supported. - - - `preceding_filter` - - Filter raw data. For a detailed introduction to this part, you can refer to the [Column Mapping, Transformation and Filtering] document. - - Tips: Dynamic multiple tables are not supported. - - - `where_predicates` - - Filter imported data based on conditions. For a detailed introduction to this part, you can refer to the [Column Mapping, Transformation and Filtering] document. - - `WHERE k1 > 100 and k2 = 1000` - - Tips: When using dynamic multiple tables, please note that this parameter should be consistent with the type of each dynamic table, otherwise it will result in import failure. - - - `partitions` - - Specify in which partitions of the import destination table. If not specified, it will be automatically imported into the corresponding partition. - - `PARTITION(p1, p2, p3)` - - Tips: When using dynamic multiple tables, please note that this parameter should conform to each dynamic table, otherwise it may cause import failure. - - - `DELETE ON` - - It needs to be used with the MEREGE import mode, only for the table of the Unique Key model. Used to specify the columns and calculated relationships in the imported data that represent the Delete Flag. - - `DELETE ON v3 >100` - - Tips: When using dynamic multiple tables, please note that this parameter should conform to each dynamic table, otherwise it may cause import failure. - - - `ORDER BY` - - Tables only for the Unique Key model. Used to specify the column in the imported data that represents the Sequence Col. Mainly used to ensure data order when importing. - - Tips: When using dynamic multiple tables, please note that this parameter should conform to each dynamic table, otherwise it may cause import failure. - -- `job_properties` - - Common parameters for specifying routine import jobs. - - ````text - PROPERTIES ( - "key1" = "val1", - "key2" = "val2" - ) - ```` - - Currently we support the following parameters: - - 1. `desired_concurrent_number` - - Desired concurrency. A routine import job will be divided into multiple subtasks for execution. This parameter specifies the maximum number of tasks a job can execute concurrently. Must be greater than 0. Default is 5. - - This degree of concurrency is not the actual degree of concurrency. The actual degree of concurrency will be comprehensively considered by the number of nodes in the cluster, the load situation, and the situation of the data source. - - `"desired_concurrent_number" = "3"` - - 2. `max_batch_interval/max_batch_rows/max_batch_size` - - These three parameters represent: - - 1. The maximum execution time of each subtask, in seconds. Must be greater than or equal to 1. The default is 10. - 2. The maximum number of lines read by each subtask. Must be greater than or equal to 200000. The default is 200000. - 3. The maximum number of bytes read by each subtask. The unit is bytes and the range is 100MB to 10GB. The default is 100MB. - - These three parameters are used to control the execution time and processing volume of a subtask. When either one reaches the threshold, the task ends. - - ````text - "max_batch_interval" = "20", - "max_batch_rows" = "300000", - "max_batch_size" = "209715200" - ```` - - 3. `max_error_number` - - The maximum number of error lines allowed within the sampling window. Must be greater than or equal to 0. The default is 0, which means no error lines are allowed. - - The sampling window is `max_batch_rows * 10`. That is, if the number of error lines is greater than `max_error_number` within the sampling window, the routine operation will be suspended, requiring manual intervention to check data quality problems. - - Rows that are filtered out by where conditions are not considered error rows. - - 4. `strict_mode` - - Whether to enable strict mode, the default is off. If enabled, the column type conversion of non-null raw data will be filtered if the result is NULL. Specify as: - - `"strict_mode" = "true"` - - The strict mode mode means strict filtering of column type conversions during the load process. The strict filtering strategy is as follows: - - 1. For column type conversion, if strict mode is true, the wrong data will be filtered. The error data here refers to the fact that the original data is not null, and the result is a null value after participating in the column type conversion. - 2. When a loaded column is generated by a function transformation, strict mode has no effect on it. - 3. For a column type loaded with a range limit, if the original data can pass the type conversion normally, but cannot pass the range limit, strict mode will not affect it. For example, if the type is decimal(1,0) and the original data is 10, it is eligible for type conversion but not for column declarations. This data strict has no effect on it. - - **strict mode and load relationship of source data** - - Here is an example of a column type of TinyInt. - - > Note: When a column in a table allows a null value to be loaded - - | source data | source data example | string to int | strict_mode | result | - | ----------- | ------------------- | ------------- | ------------- | ---------------------- | - | null | \N | N/A | true or false | NULL | - | not null | aaa or 2000 | NULL | true | invalid data(filtered) | - | not null | aaa | NULL | false | NULL | - | not null | 1 | 1 | true or false | correct data | - - Here the column type is Decimal(1,0) - - > Note: When a column in a table allows a null value to be loaded - - | source data | source data example | string to int | strict_mode | result | - | ----------- | ------------------- | ------------- | ------------- | ---------------------- | - | null | \N | N/A | true or false | NULL | - | not null | aaa | NULL | true | invalid data(filtered) | - | not null | aaa | NULL | false | NULL | - | not null | 1 or 10 | 1 | true or false | correct data | - - > Note: 10 Although it is a value that is out of range, because its type meets the requirements of decimal, strict mode has no effect on it. 10 will eventually be filtered in other ETL processing flows. But it will not be filtered by strict mode. - - 5. `timezone` - - Specifies the time zone used by the import job. The default is to use the Session's timezone parameter. This parameter affects the results of all time zone-related functions involved in the import. - - 6. `format` - - Specify the import data format, the default is csv, and the json format is supported. - - 7. `jsonpaths` - - When the imported data format is json, the fields in the Json data can be extracted by specifying jsonpaths. - - `-H "jsonpaths: [\"$.k2\", \"$.k1\"]"` - - 8. `strip_outer_array` - - When the imported data format is json, strip_outer_array is true, indicating that the Json data is displayed in the form of an array, and each element in the data will be regarded as a row of data. The default value is false. - - `-H "strip_outer_array: true"` - - 9. `json_root` - - When the import data format is json, you can specify the root node of the Json data through json_root. Doris will extract the elements of the root node through json_root for parsing. Default is empty. - - `-H "json_root: $.RECORDS"` - 10. `send_batch_parallelism` - - Integer, Used to set the default parallelism for sending batch, if the value for parallelism exceed `max_send_batch_parallelism_per_job` in BE config, then the coordinator BE will use the value of `max_send_batch_parallelism_per_job`. - - 11. `load_to_single_tablet` - Boolean type, True means that one task can only load data to one tablet in the corresponding partition at a time. The default value is false. This parameter can only be set when loading data into the OLAP table with random bucketing. - - 12. `partial_columns` - Boolean type, True means that use partial column update, the default value is false, this parameter is only allowed to be set when the table model is Unique and Merge on Write is used. Multi-table does not support this parameter. - - 13. `max_filter_ratio` - The maximum allowed filtering rate within the sampling window. Must be between 0 and 1. The default value is 0. - - The sampling window is `max_batch_rows * 10`. That is, if the number of error lines / total lines is greater than `max_filter_ratio` within the sampling window, the routine operation will be suspended, requiring manual intervention to check data quality problems. - - Rows that are filtered out by where conditions are not considered error rows. - - 14. `enclose` - When the csv data field contains row delimiters or column delimiters, to prevent accidental truncation, single-byte characters can be specified as brackets for protection. For example, the column separator is ",", the bracket is "'", and the data is "a,'b,c'", then "b,c" will be parsed as a field. - - 15. `escape` - Used to escape characters that appear in a csv field identical to the enclosing characters. For example, if the data is "a,'b,'c'", enclose is "'", and you want "b,'c to be parsed as a field, you need to specify a single-byte escape character, such as "\", and then modify the data to "a,' b,\'c'". - -- `FROM data_source [data_source_properties]` - - The type of data source. Currently supports: - - ````text - FROM KAFKA - ( - "key1" = "val1", - "key2" = "val2" - ) - ```` - - `data_source_properties` supports the following data source properties: - - 1. `kafka_broker_list` - - Kafka's broker connection information. The format is ip:host. Separate multiple brokers with commas. - - `"kafka_broker_list" = "broker1:9092,broker2:9092"` - - 2. `kafka_topic` - - Specifies the Kafka topic to subscribe to. - - `"kafka_topic" = "my_topic"` - - 3. `kafka_partitions/kafka_offsets` - - Specify the kafka partition to be subscribed to, and the corresponding starting offset of each partition. If a time is specified, consumption will start at the nearest offset greater than or equal to the time. - - offset can specify a specific offset from 0 or greater, or: - - - `OFFSET_BEGINNING`: Start subscription from where there is data. - - `OFFSET_END`: subscribe from the end. - - Time format, such as: "2021-05-22 11:00:00" - - If not specified, all partitions under topic will be subscribed from `OFFSET_END` by default. - - ````text - "kafka_partitions" = "0,1,2,3", - "kafka_offsets" = "101,0,OFFSET_BEGINNING,OFFSET_END" - ```` - - ````text - "kafka_partitions" = "0,1,2,3", - "kafka_offsets" = "2021-05-22 11:00:00,2021-05-22 11:00:00,2021-05-22 11:00:00" - ```` - - Note that the time format cannot be mixed with the OFFSET format. - - 4. `property` - - Specify custom kafka parameters. The function is equivalent to the "--property" parameter in the kafka shell. - - When the value of the parameter is a file, you need to add the keyword: "FILE:" before the value. - - For how to create a file, please refer to the [CREATE FILE](../../../Data-Definition-Statements/Create/CREATE-FILE) command documentation. - - For more supported custom parameters, please refer to the configuration items on the client side in the official CONFIGURATION document of librdkafka. Such as: - - ````text - "property.client.id" = "12345", - "property.ssl.ca.location" = "FILE:ca.pem" - ```` - - 1. When connecting to Kafka using SSL, you need to specify the following parameters: - - ````text - "property.security.protocol" = "ssl", - "property.ssl.ca.location" = "FILE:ca.pem", - "property.ssl.certificate.location" = "FILE:client.pem", - "property.ssl.key.location" = "FILE:client.key", - "property.ssl.key.password" = "abcdefg" - ```` - - in: - - `property.security.protocol` and `property.ssl.ca.location` are required to indicate the connection method is SSL and the location of the CA certificate. - - If client authentication is enabled on the Kafka server side, thenAlso set: - - ````text - "property.ssl.certificate.location" - "property.ssl.key.location" - "property.ssl.key.password" - ```` - - They are used to specify the client's public key, private key, and password for the private key, respectively. - - 2. Specify the default starting offset of the kafka partition - - If `kafka_partitions/kafka_offsets` is not specified, all partitions are consumed by default. - - At this point, you can specify `kafka_default_offsets` to specify the starting offset. Defaults to `OFFSET_END`, i.e. subscribes from the end. - - Example: - - ````text - "property.kafka_default_offsets" = "OFFSET_BEGINNING" - ```` -- comment - Comment for the routine load job. -### Example - -1. Create a Kafka routine import task named test1 for example_tbl of example_db. Specify the column separator and group.id and client.id, and automatically consume all partitions by default, and start subscribing from the location where there is data (OFFSET_BEGINNING) - - - - ```sql - CREATE ROUTINE LOAD example_db.test1 ON example_tbl - COLUMNS TERMINATED BY ",", - COLUMNS(k1, k2, k3, v1, v2, v3 = k1 * 100) - PROPERTIES - ( - "desired_concurrent_number"="3", - "max_batch_interval" = "20", - "max_batch_rows" = "300000", - "max_batch_size" = "209715200", - "strict_mode" = "false" - ) - FROM KAFKA - ( - "kafka_broker_list" = "broker1:9092,broker2:9092,broker3:9092", - "kafka_topic" = "my_topic", - "property.group.id" = "xxx", - "property.client.id" = "xxx", - "property.kafka_default_offsets" = "OFFSET_BEGINNING" - ); - ```` - -2. Create a Kafka routine dynamic multiple tables import task named "test1" for the "example_db". Specify the column delimiter, group.id, and client.id, and automatically consume all partitions, subscribing from the position with data (OFFSET_BEGINNING). - -Assuming that we need to import data from Kafka into tables "test1" and "test2" in the "example_db", we create a routine import task named "test1". At the same time, we write the data in "test1" and "test2" to a Kafka topic named "my_topic" so that data from Kafka can be imported into both tables through a routine import task. - - ```sql - CREATE ROUTINE LOAD example_db.test1 - PROPERTIES - ( - "desired_concurrent_number"="3", - "max_batch_interval" = "20", - "max_batch_rows" = "300000", - "max_batch_size" = "209715200", - "strict_mode" = "false" - ) - FROM KAFKA - ( - "kafka_broker_list" = "broker1:9092,broker2:9092,broker3:9092", - "kafka_topic" = "my_topic", - "property.group.id" = "xxx", - "property.client.id" = "xxx", - "property.kafka_default_offsets" = "OFFSET_BEGINNING" - ); - ``` - -3. Create a Kafka routine import task named test1 for example_tbl of example_db. Import tasks are in strict mode. - - - - ```sql - CREATE ROUTINE LOAD example_db.test1 ON example_tbl - COLUMNS(k1, k2, k3, v1, v2, v3 = k1 * 100), - PRECEDING FILTER k1 = 1, - WHERE k1 > 100 and k2 like "%doris%" - PROPERTIES - ( - "desired_concurrent_number"="3", - "max_batch_interval" = "20", - "max_batch_rows" = "300000", - "max_batch_size" = "209715200", - "strict_mode" = "true" - ) - FROM KAFKA - ( - "kafka_broker_list" = "broker1:9092,broker2:9092,broker3:9092", - "kafka_topic" = "my_topic", - "kafka_partitions" = "0,1,2,3", - "kafka_offsets" = "101,0,0,200" - ); - ```` - -4. Import data from the Kafka cluster through SSL authentication. Also set the client.id parameter. The import task is in non-strict mode and the time zone is Africa/Abidjan - - - - ```sql - CREATE ROUTINE LOAD example_db.test1 ON example_tbl - COLUMNS(k1, k2, k3, v1, v2, v3 = k1 * 100), - WHERE k1 > 100 and k2 like "%doris%" - PROPERTIES - ( - "desired_concurrent_number"="3", - "max_batch_interval" = "20", - "max_batch_rows" = "300000", - "max_batch_size" = "209715200", - "strict_mode" = "false", - "timezone" = "Africa/Abidjan" - ) - FROM KAFKA - ( - "kafka_broker_list" = "broker1:9092,broker2:9092,broker3:9092", - "kafka_topic" = "my_topic", - "property.security.protocol" = "ssl", - "property.ssl.ca.location" = "FILE:ca.pem", - "property.ssl.certificate.location" = "FILE:client.pem", - "property.ssl.key.location" = "FILE:client.key", - "property.ssl.key.password" = "abcdefg", - "property.client.id" = "my_client_id" - ); - ```` - -5. Import data in Json format. By default, the field name in Json is used as the column name mapping. Specify to import three partitions 0, 1, and 2, and the starting offsets are all 0 - - - - ```sql - CREATE ROUTINE LOAD example_db.test_json_label_1 ON table1 - COLUMNS(category,price,author) - PROPERTIES - ( - "desired_concurrent_number"="3", - "max_batch_interval" = "20", - "max_batch_rows" = "300000", - "max_batch_size" = "209715200", - "strict_mode" = "false", - "format" = "json" - ) - FROM KAFKA - ( - "kafka_broker_list" = "broker1:9092,broker2:9092,broker3:9092", - "kafka_topic" = "my_topic", - "kafka_partitions" = "0,1,2", - "kafka_offsets" = "0,0,0" - ); - ```` - -6. Import Json data, extract fields through Jsonpaths, and specify the root node of the Json document - - - - ```sql - CREATE ROUTINE LOAD example_db.test1 ON example_tbl - COLUMNS(category, author, price, timestamp, dt=from_unixtime(timestamp, '%Y%m%d')) - PROPERTIES - ( - "desired_concurrent_number"="3", - "max_batch_interval" = "20", - "max_batch_rows" = "300000", - "max_batch_size" = "209715200", - "strict_mode" = "false", - "format" = "json", - "jsonpaths" = "[\"$.category\",\"$.author\",\"$.price\",\"$.timestamp\"]", - "json_root" = "$.RECORDS" - "strip_outer_array" = "true" - ) - FROM KAFKA - ( - "kafka_broker_list" = "broker1:9092,broker2:9092,broker3:9092", - "kafka_topic" = "my_topic", - "kafka_partitions" = "0,1,2", - "kafka_offsets" = "0,0,0" - ); - ```` - -7. Create a Kafka routine import task named test1 for example_tbl of example_db. And use conditional filtering. - - - - ```sql - CREATE ROUTINE LOAD example_db.test1 ON example_tbl - WITH MERGE - COLUMNS(k1, k2, k3, v1, v2, v3), - WHERE k1 > 100 and k2 like "%doris%", - DELETE ON v3 >100 - PROPERTIES - ( - "desired_concurrent_number"="3", - "max_batch_interval" = "20", - "max_batch_rows" = "300000", - "max_batch_size" = "209715200", - "strict_mode" = "false" - ) - FROM KAFKA - ( - "kafka_broker_list" = "broker1:9092,broker2:9092,broker3:9092", - "kafka_topic" = "my_topic", - "kafka_partitions" = "0,1,2,3", - "kafka_offsets" = "101,0,0,200" - ); - ```` - -8. Import data to Unique with sequence column Key model table - - - - ```sql - CREATE ROUTINE LOAD example_db.test_job ON example_tbl - COLUMNS TERMINATED BY ",", - COLUMNS(k1,k2,source_sequence,v1,v2), - ORDER BY source_sequence - PROPERTIES - ( - "desired_concurrent_number"="3", - "max_batch_interval" = "30", - "max_batch_rows" = "300000", - "max_batch_size" = "209715200" - ) FROM KAFKA - ( - "kafka_broker_list" = "broker1:9092,broker2:9092,broker3:9092", - "kafka_topic" = "my_topic", - "kafka_partitions" = "0,1,2,3", - "kafka_offsets" = "101,0,0,200" - ); - ```` - -9. Consume from a specified point in time - - - - ```sql - CREATE ROUTINE LOAD example_db.test_job ON example_tbl - PROPERTIES - ( - "desired_concurrent_number"="3", - "max_batch_interval" = "30", - "max_batch_rows" = "300000", - "max_batch_size" = "209715200" - ) FROM KAFKA - ( - "kafka_broker_list" = "broker1:9092,broker2:9092", - "kafka_topic" = "my_topic", - "kafka_default_offsets" = "2021-05-21 10:00:00" - ); - ```` - -### Keywords - - CREATE, ROUTINE, LOAD, CREATE LOAD - -### Best Practice - -Partition and Offset for specified consumption - -Doris supports the specified Partition and Offset to start consumption, and also supports the function of consumption at a specified time point. The configuration relationship of the corresponding parameters is described here. - -There are three relevant parameters: - -- `kafka_partitions`: Specify a list of partitions to be consumed, such as "0, 1, 2, 3". -- `kafka_offsets`: Specify the starting offset of each partition, which must correspond to the number of `kafka_partitions` list. For example: "1000, 1000, 2000, 2000" -- `property.kafka_default_offsets`: Specifies the default starting offset of the partition. - -When creating an import job, these three parameters can have the following combinations: - -| Composition | `kafka_partitions` | `kafka_offsets` | `property.kafka_default_offsets` | Behavior | -| ----------- | ------------------ | --------------- | ------------------------------- | ------------------------------------------------------------ | -| 1 | No | No | No | The system will automatically find all partitions corresponding to the topic and start consumption from OFFSET_END | -| 2 | No | No | Yes | The system will automatically find all partitions corresponding to the topic and start consumption from the location specified by default offset | -| 3 | Yes | No | No | The system will start consumption from OFFSET_END of the specified partition | -| 4 | Yes | Yes | No | The system will start consumption from the specified offset of the specified partition | -| 5 | Yes | No | Yes | The system will start consumption from the specified partition, the location specified by default offset | diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CREATE-SYNC-JOB.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CREATE-SYNC-JOB.md deleted file mode 100644 index e15a6c149e3e37..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/CREATE-SYNC-JOB.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -{ - "title": "CREATE-SYNC-JOB", - "language": "en" -} ---- - - - -## CREATE-SYNC-JOB - -### Name - -CREATE SYNC JOB - -### Description - -The data synchronization (Sync Job) function supports users to submit a resident data synchronization job, and incrementally synchronizes the CDC (Change Data Capture) of the user's data update operation in the Mysql database by reading the Binlog log from the specified remote address. Features. - -Currently, the data synchronization job only supports connecting to Canal, obtaining the parsed Binlog data from the Canal Server and importing it into Doris. - -Users can view the data synchronization job status through [SHOW SYNC JOB](../../Show-Statements/SHOW-SYNC-JOB.md). - -grammar: - -```sql -CREATE SYNC [db.]job_name - ( - channel_desc, - channel_desc - ... - ) -binlog_desc -```` - -1. `job_name` - - The synchronization job name is the unique identifier of the job in the current database. Only one job with the same `job_name` can be running. - -2. `channel_desc` - - The data channel under the job is used to describe the mapping relationship between the mysql source table and the doris target table. - - grammar: - - ```sql - FROM mysql_db.src_tbl INTO des_tbl - [columns_mapping] - ```` - - 1. `mysql_db.src_tbl` - - Specify the database and source table on the mysql side. - - 2. `des_tbl` - - Specify the target table on the doris side. Only unique tables are supported, and the batch delete function of the table needs to be enabled (see the 'batch delete function' of help alter table for how to enable it). - - 4. `column_mapping` - - Specifies the mapping relationship between the columns of the mysql source table and the doris target table. If not specified, FE will default the columns of the source table and the target table to one-to-one correspondence in order. - - The form col_name = expr is not supported for columns. - - Example: - - ```` - Suppose the target table column is (k1, k2, v1), - - Change the order of columns k1 and k2 - (k2, k1, v1) - - Ignore the fourth column of the source data - (k2, k1, v1, dummy_column) - ```` - -3. `binlog_desc` - - Used to describe the remote data source, currently only one canal is supported. - - grammar: - - ```sql - FROM BINLOG - ( - "key1" = "value1", - "key2" = "value2" - ) - ```` - - 1. The properties corresponding to the Canal data source, prefixed with `canal.` - - 1. canal.server.ip: address of canal server - 2. canal.server.port: the port of the canal server - 3. canal.destination: the identity of the instance - 4. canal.batchSize: The maximum batch size obtained, the default is 8192 - 5. canal.username: username of instance - 6. canal.password: the password of the instance - 7. canal.debug: optional, when set to true, the batch and details of each row of data will be printed out - -### Example - -1. Simply create a data synchronization job named `job1` for `test_tbl` of `test_db`, connect to the local Canal server, corresponding to the Mysql source table `mysql_db1.tbl1`. - - ````SQL - CREATE SYNC `test_db`.`job1` - ( - FROM `mysql_db1`.`tbl1` INTO `test_tbl` - ) - FROM BINLOG - ( - "type" = "canal", - "canal.server.ip" = "127.0.0.1", - "canal.server.port" = "11111", - "canal.destination" = "example", - "canal.username" = "", - "canal.password" = "" - ); - ```` - -2. Create a data synchronization job named `job1` for multiple tables of `test_db`, corresponding to multiple Mysql source tables one-to-one, and explicitly specify the column mapping. - - ````SQL - CREATE SYNC `test_db`.`job1` - ( - FROM `mysql_db`.`t1` INTO `test1` (k1, k2, v1) , - FROM `mysql_db`.`t2` INTO `test2` (k3, k4, v2) - ) - FROM BINLOG - ( - "type" = "canal", - "canal.server.ip" = "xx.xxx.xxx.xx", - "canal.server.port" = "12111", - "canal.destination" = "example", - "canal.username" = "username", - "canal.password" = "password" - ); - ```` - -### Keywords - - CREATE, SYNC, JOB - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/MULTI-LOAD.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/MULTI-LOAD.md deleted file mode 100644 index baf5c64964b23f..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/MULTI-LOAD.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -{ - "title": "MULTI-LOAD", - "language": "en" -} ---- - - - -## MULTI-LOAD - -### Name - -MULTI LOAD - -### Description - -Users submit multiple import jobs through the HTTP protocol. Multi Load can ensure the atomic effect of multiple import jobs - -```` -Syntax: - curl --location-trusted -u user:passwd -XPOST http://host:port/api/{db}/_multi_start?label=xxx - curl --location-trusted -u user:passwd -T data.file http://host:port/api/{db}/{table1}/_load?label=xxx\&sub_label=yyy - curl --location-trusted -u user:passwd -T data.file http://host:port/api/{db}/{table2}/_load?label=xxx\&sub_label=zzz - curl --location-trusted -u user:passwd -XPOST http://host:port/api/{db}/_multi_commit?label=xxx - curl --location-trusted -u user:passwd -XPOST http://host:port/api/{db}/_multi_desc?label=xxx - -On the basis of 'MINI LOAD', 'MULTI LOAD' can support users to import to multiple tables at the same time. The specific commands are shown above. -'/api/{db}/_multi_start' starts a multi-table import task -'/api/{db}/{table}/_load' adds a table to be imported to an import task. The main difference from 'MINI LOAD' is that the 'sub_label' parameter needs to be passed in -'/api/{db}/_multi_commit' submits the entire multi-table import task, and starts processing in the background -'/api/{db}/_multi_abort' Abort a multi-table import task -'/api/{db}/_multi_desc' can display the number of jobs submitted by a multi-table import task - -Description of the HTTP protocol - Authorization Authentication Currently, Doris uses HTTP Basic authorization authentication. So you need to specify the username and password when importing - This method is to pass the password in clear text, since we are currently in an intranet environment. . . - - Expect Doris needs to send the http request, it needs to have 'Expect' header information, the content is '100-continue' - why? Because we need to redirect the request, before transmitting the data content, - This can avoid causing multiple transmissions of data, thereby improving efficiency. - - Content-Length Doris needs to send the request with the 'Content-Length' header. If the content sent is greater than - If the 'Content-Length' is less, then Palo thinks that there is a problem with the transmission, and fails to submit the task. - NOTE: If more data is sent than 'Content-Length', then Doris only reads 'Content-Length' - length content and import - -Parameter Description: - user: If the user is in the default_cluster, the user is the user_name. Otherwise user_name@cluster_name. - - label: Used to specify the label number imported in this batch, which is used for later job status query, etc. - This parameter is required. - - sub_label: Used to specify the subversion number inside a multi-table import task. For loads imported from multiple tables, this parameter must be passed in. - - columns: used to describe the corresponding column names in the import file. - If it is not passed in, then the order of the columns in the file is considered to be the same as the order in which the table was created. - The specified method is comma-separated, for example: columns=k1,k2,k3,k4 - - column_separator: used to specify the separator between columns, the default is '\t' - NOTE: url encoding is required, for example, '\t' needs to be specified as the delimiter, - Then you should pass in 'column_separator=%09' - - max_filter_ratio: used to specify the maximum ratio of non-standard data allowed to filter, the default is 0, no filtering is allowed - The custom specification should be as follows: 'max_filter_ratio=0.2', which means 20% error rate is allowed - Passing in has effect when '_multi_start' - -NOTE: - 1. This import method currently completes the import work on one machine, so it is not suitable for import work with a large amount of data. - It is recommended that the amount of imported data should not exceed 1GB - - 2. Currently it is not possible to submit multiple files using `curl -T "{file1, file2}"`, because curl splits them into multiple files - The request is sent. Multiple requests cannot share a label number, so it cannot be used. - - 3. Supports the use of curl to import data into Doris in a way similar to streaming, but only after the streaming ends Doris - The real import behavior will occur, and the amount of data in this way cannot be too large. -```` - -### Example - -```` -1. Import the data in the local file 'testData1' into the table 'testTbl1' in the database 'testDb', and -Import the data of 'testData2' into table 'testTbl2' in 'testDb' (user is in defalut_cluster) - curl --location-trusted -u root -XPOST http://host:port/api/testDb/_multi_start?label=123 - curl --location-trusted -u root -T testData1 http://host:port/api/testDb/testTbl1/_load?label=123\&sub_label=1 - curl --location-trusted -u root -T testData2 http://host:port/api/testDb/testTbl2/_load?label=123\&sub_label=2 - curl --location-trusted -u root -XPOST http://host:port/api/testDb/_multi_commit?label=123 - -2. Abandoned in the middle of multi-table import (user is in defalut_cluster) - curl --location-trusted -u root -XPOST http://host:port/api/testDb/_multi_start?label=123 - curl --location-trusted -u root -T testData1 http://host:port/api/testDb/testTbl1/_load?label=123\&sub_label=1 - curl --location-trusted -u root -XPOST http://host:port/api/testDb/_multi_abort?label=123 - -3. Multi-table import to see how much content has been submitted (the user is in the defalut_cluster) - curl --location-trusted -u root -XPOST http://host:port/api/testDb/_multi_start?label=123 - curl --location-trusted -u root -T testData1 http://host:port/api/testDb/testTbl1/_load?label=123\&sub_label=1 - curl --location-trusted -u root -XPOST http://host:port/api/testDb/_multi_desc?label=123 -```` - -### Keywords - -``` -MULTI, MINI, LOAD -``` - -### Best Practice diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/MYSQL-LOAD.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/MYSQL-LOAD.md deleted file mode 100644 index 673b21304f8c6b..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/MYSQL-LOAD.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -{ - "title": "MYSQL-LOAD", - "language": "en" -} ---- - - - -## MYSQL-LOAD - -### Name - - - MYSQL LOAD - - -### Description - -mysql-load: Import local data using the MySql client - -``` -LOAD DATA -[LOCAL] -INFILE 'file_name' -INTO TABLE tbl_name -[PARTITION (partition_name [, partition_name] ...)] -[COLUMNS TERMINATED BY 'string'] -[LINES TERMINATED BY 'string'] -[IGNORE number {LINES | ROWS}] -[(col_name_or_user_var [, col_name_or_user_var] ...)] -[SET (col_name={expr | DEFAULT} [, col_name={expr | DEFAULT}] ...)] -[PROPERTIES (key1 = value1 [, key2=value2]) ] -``` - -This statement is used to import data to the specified table. Unlike normal Load, this import method is a synchronous import. - -This import method can still guarantee the atomicity of a batch of import tasks, either all data imports are successful or all fail. - -1. MySQL Load starts with the syntax `LOAD DATA`, without specifying `LABEL` -2. Specify `LOCAL` to read client side files. Not specified to read FE server side local files. Server side load was disabled by default. It can be enabled by setting a secure path in FE configuration `mysql_load_server_secure_path` -3. The local fill path will be filled after `INFILE`, which can be a relative path or an absolute path. Currently only a single file is supported, and multiple files are not supported -4. The table name after `INTO TABLE` can specify the database name, as shown in the case. It can also be omitted, and the database where the current user is located will be used. -5. `PARTITION` syntax supports specified partition to import -6. `COLUMNS TERMINATED BY` specifies the column separator -7. `LINES TERMINATED BY` specifies the line separator -8. `IGNORE num LINES` The user skips the header of the CSV and can skip any number of lines. This syntax can also be replaced by'IGNORE num ROWS ' -9. Column mapping syntax, please refer to the column mapping chapter of [Imported Data Transformation](../../../../data-operate/import/import-way/mysql-load-manual.md) -10. `PROPERTIES` parameter configuration, see below for details - -### PROPERTIES - -1. max_filter_ratio:The maximum tolerable data ratio that can be filtered (for reasons such as data irregularity). Zero tolerance by default. Data irregularities do not include rows filtered out by where conditions. - -2. timeout: Specify the import timeout. in seconds. The default is 600 seconds. The setting range is from 1 second to 259200 seconds. - -3. strict_mode: The user specifies whether to enable strict mode for this import. The default is off. - -4. timezone: Specify the time zone used for this import. The default is Dongba District. This parameter affects the results of all time zone-related functions involved in the import. - -5. exec_mem_limit: Import memory limit. Default is 2GB. The unit is bytes. - -6. trim_double_quotes: Boolean type, The default value is false. True means that the outermost double quotes of each field in the load file are trimmed. - -7. enclose: When the csv data field contains row delimiters or column delimiters, to prevent accidental truncation, single-byte characters can be specified as brackets for protection. For example, the column separator is ",", the bracket is "'", and the data is "a,'b,c'", then "b,c" will be parsed as a field. - -8. escape: Used to escape characters that appear in a csv field identical to the enclosing characters. For example, if the data is "a,'b,'c'", enclose is "'", and you want "b,'c to be parsed as a field, you need to specify a single-byte escape character, such as "\", and then modify the data to "a,' b,\'c'". - -### Example - -1. Import the data from the client side local file `testData` into the table `testTbl` in the database `testDb`. Specify a timeout of 100 seconds - - ```sql - LOAD DATA LOCAL - INFILE 'testData' - INTO TABLE testDb.testTbl - PROPERTIES ("timeout"="100") - ``` - -2. Import the data from the server side local file `/root/testData` (set FE config `mysql_load_server_secure_path` to be `root` already) into the table `testTbl` in the database `testDb`. Specify a timeout of 100 seconds - - ```sql - LOAD DATA - INFILE '/root/testData' - INTO TABLE testDb.testTbl - PROPERTIES ("timeout"="100") - ``` - -3. Import data from client side local file `testData` into table `testTbl` in database `testDb`, allowing 20% error rate - - ```sql - LOAD DATA LOCAL - INFILE 'testData' - INTO TABLE testDb.testTbl - PROPERTIES ("max_filter_ratio"="0.2") - ``` - -4. Import the data from the client side local file `testData` into the table `testTbl` in the database `testDb`, allowing a 20% error rate and specifying the column names of the file - - ```sql - LOAD DATA LOCAL - INFILE 'testData' - INTO TABLE testDb.testTbl - (k2, k1, v1) - PROPERTIES ("max_filter_ratio"="0.2") - ``` - -5. Import the data in the local file `testData` into the p1, p2 partitions in the table of `testTbl` in the database `testDb`, allowing a 20% error rate. - - ```sql - LOAD DATA LOCAL - INFILE 'testData' - INTO TABLE testDb.testTbl - PARTITION (p1, p2) - PROPERTIES ("max_filter_ratio"="0.2") - ``` - -6. Import the data in the CSV file `testData` with a local row delimiter of `0102` and a column delimiter of `0304` into the table `testTbl` in the database `testDb`. - - ```sql - LOAD DATA LOCAL - INFILE 'testData' - INTO TABLE testDb.testTbl - COLUMNS TERMINATED BY '0304' - LINES TERMINATED BY '0102' - ``` - -7. Import the data from the local file `testData` into the p1, p2 partitions in the table of `testTbl` in the database `testDb` and skip the first 3 lines. - - ```sql - LOAD DATA LOCAL - INFILE 'testData' - INTO TABLE testDb.testTbl - PARTITION (p1, p2) - IGNORE 1 LINES - ``` - -8. Import data for strict schema filtering and set the time zone to Africa/Abidjan - - ```sql - LOAD DATA LOCAL - INFILE 'testData' - INTO TABLE testDb.testTbl - PROPERTIES ("strict_mode"="true", "timezone"="Africa/Abidjan") - ``` - -9. Import data is limited to 10GB of import memory and timed out in 10 minutes - - ```sql - LOAD DATA LOCAL - INFILE 'testData' - INTO TABLE testDb.testTbl - PROPERTIES ("exec_mem_limit"="10737418240", "timeout"="600") - ``` - -### Keywords - - MYSQL, LOAD diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/PAUSE-ROUTINE-LOAD.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/PAUSE-ROUTINE-LOAD.md deleted file mode 100644 index a3e57e333d19de..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/PAUSE-ROUTINE-LOAD.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ - "title": "PAUSE-ROUTINE-LOAD", - "language": "en" -} ---- - - - -## PAUSE-ROUTINE-LOAD - -### Name - -PAUSE ROUTINE LOAD - -### Description - -Used to pause a Routine Load job. A suspended job can be rerun with the RESUME command. - -```sql -PAUSE [ALL] ROUTINE LOAD FOR job_name -```` - -### Example - -1. Pause the routine import job named test1. - - ```sql - PAUSE ROUTINE LOAD FOR test1; - ```` - -2. Pause all routine import jobs. - - ```sql - PAUSE ALL ROUTINE LOAD; - ```` - -### Keywords - - PAUSE, ROUTINE, LOAD - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/PAUSE-SYNC-JOB.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/PAUSE-SYNC-JOB.md deleted file mode 100644 index 0af27181f44043..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/PAUSE-SYNC-JOB.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "PAUSE-SYNC-JOB", - "language": "en" -} ---- - - - -## PAUSE-SYNC-JOB - -### Name - -PAUSE SYNC JOB - -### Description - -Pause a running resident data synchronization job in a database via `job_name`. The suspended job will stop synchronizing data and keep the latest position of consumption until it is resumed by the user. - -grammar: - -```sql -PAUSE SYNC JOB [db.]job_name -```` - -### Example - -1. Pause the data sync job named `job_name`. - - ```sql - PAUSE SYNC JOB `job_name`; - ```` - -### Keywords - - PAUSE, SYNC, JOB - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/RESUME-ROUTINE-LOAD.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/RESUME-ROUTINE-LOAD.md deleted file mode 100644 index 4b05d37ccfbd6a..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/RESUME-ROUTINE-LOAD.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -{ - "title": "RESUME-ROUTINE-LOAD", - "language": "en" -} ---- - - - -## RESUME-ROUTINE-LOAD - -### Name - -RESUME ROUTINE LOAD - -### Description - -Used to restart a suspended Routine Load job. The restarted job will continue to consume from the previously consumed offset. - -```sql -RESUME [ALL] ROUTINE LOAD FOR job_name -```` - -### Example - -1. Restart the routine import job named test1. - - ```sql - RESUME ROUTINE LOAD FOR test1; - ```` - -2. Restart all routine import jobs. - - ```sql - RESUME ALL ROUTINE LOAD; - ```` - -### Keywords - - RESUME, ROUTINE, LOAD - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/RESUME-SYNC-JOB.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/RESUME-SYNC-JOB.md deleted file mode 100644 index 4da91e225ef21e..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/RESUME-SYNC-JOB.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "RESUME-SYNC-JOB", - "language": "en" -} ---- - - - -## RESUME-SYNC-JOB - -### Name - -RESUME SYNC JOB - -### Description - -Resume a resident data synchronization job whose current database has been suspended by `job_name`, and the job will continue to synchronize data from the latest position before the last suspension. - -grammar: - -```sql -RESUME SYNC JOB [db.]job_name -```` - -### Example - -1. Resume the data synchronization job named `job_name` - - ```sql - RESUME SYNC JOB `job_name`; - ```` - -### Keywords - - RESUME, SYNC, LOAD - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STOP-ROUTINE-LOAD.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STOP-ROUTINE-LOAD.md deleted file mode 100644 index 375b994db9c1ad..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STOP-ROUTINE-LOAD.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -{ - "title": "STOP-ROUTINE-LOAD", - "language": "en" -} ---- - - - -## STOP-ROUTINE-LOAD - -### Name - -STOP ROUTINE LOAD - -### Description - -User stops a Routine Load job. A stopped job cannot be rerun. - -```sql -STOP ROUTINE LOAD FOR job_name; -```` - -### Example - -1. Stop the routine import job named test1. - - ```sql - STOP ROUTINE LOAD FOR test1; - ```` - -### Keywords - - STOP, ROUTINE, LOAD - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STOP-SYNC-JOB.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STOP-SYNC-JOB.md deleted file mode 100644 index cd2d125ad36d2c..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STOP-SYNC-JOB.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -{ - "title": "STOP-SYNC-JOB", - "language": "en" -} ---- - - - -## STOP-SYNC-JOB - -### Name - -STOP SYNC JOB - -### Description - -Stop a non-stop resident data synchronization job in a database by `job_name`. - -grammar: - -```sql -STOP SYNC JOB [db.]job_name -```` - -### Example - -1. Stop the data sync job named `job_name` - - ```sql - STOP SYNC JOB `job_name`; - ```` - -### Keywords - - STOP, SYNC, JOB - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STREAM-LOAD.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STREAM-LOAD.md deleted file mode 100644 index 86601d58b76129..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Load/STREAM-LOAD.md +++ /dev/null @@ -1,472 +0,0 @@ ---- -{ - "title": "STREAM-LOAD", - "language": "en" -} ---- - - - -## STREAM-LOAD - -### Name - -STREAM LOAD - -### Description - -stream-load: load data to table in streaming - -```` -curl --location-trusted -u user:passwd [-H ""...] -T data.file -XPUT http://fe_host:http_port/api/{db}/{table}/_stream_load -```` - -This statement is used to import data into the specified table. The difference from ordinary Load is that this import method is synchronous import. - - This import method can still ensure the atomicity of a batch of import tasks, either all data is imported successfully or all of them fail. - - This operation will update the data of the rollup table related to this base table at the same time. - - This is a synchronous operation. After the entire data import work is completed, the import result is returned to the user. - - Currently, HTTP chunked and non-chunked uploads are supported. For non-chunked methods, Content-Length must be used to indicate the length of the uploaded content, which can ensure the integrity of the data. - -In addition, it is best for users to set the content of the Expect Header field to 100-continue, which can avoid unnecessary data transmission in some error scenarios. - -Parameter introduction: - Users can pass in import parameters through the Header part of HTTP - -1. label: The label imported once, the data of the same label cannot be imported multiple times. Users can avoid the problem of duplicate data import by specifying Label. - - Currently, Doris retains the most recent successful label within 30 minutes. - -2. column_separator: used to specify the column separator in the import file, the default is \t. If it is an invisible character, you need to add \x as a prefix and use hexadecimal to represent the separator. - - For example, the separator \x01 of the hive file needs to be specified as -H "column_separator:\x01". - - You can use a combination of multiple characters as column separators. - -3. line_delimiter: used to specify the newline character in the imported file, the default is \n. Combinations of multiple characters can be used as newlines. - -4. columns: used to specify the correspondence between the columns in the import file and the columns in the table. If the column in the source file corresponds exactly to the content in the table, then there is no need to specify the content of this field. - - If the source file does not correspond to the table schema, then this field is required for some data conversion. There are two forms of column, one is directly corresponding to the field in the imported file, which is directly represented by the field name; - - One is derived column, the syntax is `column_name` = expression. Give a few examples to help understand. - - Example 1: There are 3 columns "c1, c2, c3" in the table, and the three columns in the source file correspond to "c3, c2, c1" at a time; then you need to specify -H "columns: c3, c2, c1 " - - Example 2: There are 3 columns "c1, c2, c3" in the table, the first three columns in the source file correspond in turn, but there is more than 1 column; then you need to specify -H "columns: c1, c2, c3, xxx"; - - The last column can be arbitrarily assigned a name and placeholder - - Example 3: There are three columns "year, month, day" in the table, and there is only one time column in the source file, which is in "2018-06-01 01:02:03" format; - - Then you can specify -H "columns: col, year = year(col), month=month(col), day=day(col)" to complete the import - -5. where: used to extract part of the data. If the user needs to filter out the unnecessary data, he can achieve this by setting this option. - - Example 1: Only import data greater than k1 column equal to 20180601, then you can specify -H "where: k1 = 20180601" when importing - -6. max_filter_ratio: The maximum tolerable data ratio that can be filtered (for reasons such as data irregularity). Zero tolerance by default. Data irregularities do not include rows filtered out by where conditions. - -7. partitions: used to specify the partition designed for this import. If the user can determine the partition corresponding to the data, it is recommended to specify this item. Data that does not satisfy these partitions will be filtered out. - - For example, specify import to p1, p2 partition, -H "partitions: p1, p2" - -8. timeout: Specify the import timeout. in seconds. The default is 600 seconds. The setting range is from 1 second to 259200 seconds. - -9. strict_mode: The user specifies whether to enable strict mode for this import. The default is off. The enable mode is -H "strict_mode: true". - -10. timezone: Specifies the timezone used for this import. The default is "+08:00". This variable replaces the session variable `time_zone` in this import transaction. See the section "Importing with timezones" in [Best Practice](#best-practice) for more information. - -11. exec_mem_limit: Import memory limit. Default is 2GB. The unit is bytes. - -12. format: Specify load data format, support csv, json, csv_with_names(support csv file line header filter), csv_with_names_and_types(support csv file first two lines filter), parquet, orc, default is csv. - -13. jsonpaths: The way of importing json is divided into: simple mode and matching mode. - - Simple mode: The simple mode is not set the jsonpaths parameter. In this mode, the json data is required to be an object type, for example: - - ```` - {"k1":1, "k2":2, "k3":"hello"}, where k1, k2, k3 are column names. - ```` - - Matching mode: It is relatively complex for json data and needs to match the corresponding value through the jsonpaths parameter. - -14. strip_outer_array: Boolean type, true indicates that the json data starts with an array object and flattens the array object, the default value is false. E.g: - - ```` - [ - {"k1" : 1, "v1" : 2}, - {"k1" : 3, "v1" : 4} - ] - ```` - When strip_outer_array is true, the final import into doris will generate two rows of data. - - -15. json_root: json_root is a valid jsonpath string, used to specify the root node of the json document, the default value is "". - -16. merge_type: The merge type of data, which supports three types: APPEND, DELETE, and MERGE. Among them, APPEND is the default value, which means that this batch of data needs to be appended to the existing data, and DELETE means to delete all the data with the same key as this batch of data. Line, the MERGE semantics need to be used in conjunction with the delete condition, which means that the data that meets the delete condition is processed according to the DELETE semantics and the rest is processed according to the APPEND semantics, for example: `-H "merge_type: MERGE" -H "delete: flag=1"` - -17. delete: Only meaningful under MERGE, indicating the deletion condition of the data - -18. function_column.sequence_col: Only applicable to UNIQUE_KEYS. Under the same key column, ensure that the value column is REPLACEed according to the source_sequence column. The source_sequence can be a column in the data source or a column in the table structure. - -19. fuzzy_parse: Boolean type, true means that json will be parsed with the schema of the first row. Enabling this option can improve the efficiency of json import, but requires that the order of the keys of all json objects is the same as the first row, the default is false, only use in json format - -20. num_as_string: Boolean type, true means that when parsing json data, the numeric type will be converted to a string, and then imported without losing precision. - -21. read_json_by_line: Boolean type, true to support reading one json object per line, the default value is false. - -22. send_batch_parallelism: Integer, used to set the parallelism of sending batch data. If the value of parallelism exceeds `max_send_batch_parallelism_per_job` in the BE configuration, the BE as a coordination point will use the value of `max_send_batch_parallelism_per_job`. - -23. hidden_columns: Specify hidden column when no `columns` in Headers,multi hidden column shoud be -separated by commas. - - ``` - hidden_columns: __DORIS_DELETE_SIGN__,__DORIS_SEQUENCE_COL__ - The system will use the order specified by user. in case above, data should be ended - with __DORIS_SEQUENCE_COL__. - ``` -24. load_to_single_tablet: Boolean type, True means that one task can only load data to one tablet in the corresponding partition at a time. The default value is false. This parameter can only be set when loading data into the OLAP table with random bucketing. - -25. compress_type: Specify compress type file. Only support compressed csv file now. Support gz, lzo, bz2, lz4, lzop, deflate. - -26. trim_double_quotes: Boolean type, The default value is false. True means that the outermost double quotes of each field in the csv file are trimmed. - -27. skip_lines: Integer type, the default value is 0. It will skip some lines in the head of csv file. It will be disabled when format is `csv_with_names` or `csv_with_names_and_types`. - -28. comment: String type, the default value is "". - -29. enclose: When the csv data field contains row delimiters or column delimiters, to prevent accidental truncation, single-byte characters can be specified as brackets for protection. For example, the column separator is ",", the bracket is "'", and the data is "a,'b,c'", then "b,c" will be parsed as a field. - -30. escape Used to escape characters that appear in a csv field identical to the enclosing characters. For example, if the data is "a,'b,'c'", enclose is "'", and you want "b,'c to be parsed as a field, you need to specify a single-byte escape character, such as "\", and then modify the data to "a,' b,\'c'". - -### Example - -1. Import the data in the local file 'testData' into the table 'testTbl' in the database 'testDb', and use Label for deduplication. Specify a timeout of 100 seconds - - ```` - curl --location-trusted -u root -H "label:123" -H "timeout:100" -T testData http://host:port/api/testDb/testTbl/_stream_load - ```` - -2. Import the data in the local file 'testData' into the table 'testTbl' in the database 'testDb', use Label for deduplication, and only import data whose k1 is equal to 20180601 - - ```` - curl --location-trusted -u root -H "label:123" -H "where: k1=20180601" -T testData http://host:port/api/testDb/testTbl/_stream_load - ```` - -3. Import the data in the local file 'testData' into the table 'testTbl' in the database 'testDb', allowing a 20% error rate (the user is in the defalut_cluster) - - ```` - curl --location-trusted -u root -H "label:123" -H "max_filter_ratio:0.2" -T testData http://host:port/api/testDb/testTbl/_stream_load - ```` - -4. Import the data in the local file 'testData' into the table 'testTbl' in the database 'testDb', allow a 20% error rate, and specify the column name of the file (the user is in the defalut_cluster) - - ```` - curl --location-trusted -u root -H "label:123" -H "max_filter_ratio:0.2" -H "columns: k2, k1, v1" -T testData http://host:port/api/testDb/testTbl /_stream_load - ```` - -5. Import the data in the local file 'testData' into the p1, p2 partitions of the table 'testTbl' in the database 'testDb', allowing a 20% error rate. - - ```` - curl --location-trusted -u root -H "label:123" -H "max_filter_ratio:0.2" -H "partitions: p1, p2" -T testData http://host:port/api/testDb/testTbl/_stream_load - ```` - -6. Import using streaming (user is in defalut_cluster) - - ```` - seq 1 10 | awk '{OFS="\t"}{print $1, $1 * 10}' | curl --location-trusted -u root -T - http://host:port/api/testDb/testTbl/ _stream_load - ```` - -7. Import a table containing HLL columns, which can be columns in the table or columns in the data to generate HLL columns, or use hll_empty to supplement columns that are not in the data - - ```` - curl --location-trusted -u root -H "columns: k1, k2, v1=hll_hash(k1), v2=hll_empty()" -T testData http://host:port/api/testDb/testTbl/_stream_load - ```` - -8. Import data for strict mode filtering and set the time zone to Africa/Abidjan - - ```` - curl --location-trusted -u root -H "strict_mode: true" -H "timezone: Africa/Abidjan" -T testData http://host:port/api/testDb/testTbl/_stream_load - ```` - -9. Import a table with a BITMAP column, which can be a column in the table or a column in the data to generate a BITMAP column, or use bitmap_empty to fill an empty Bitmap - ```` - curl --location-trusted -u root -H "columns: k1, k2, v1=to_bitmap(k1), v2=bitmap_empty()" -T testData http://host:port/api/testDb/testTbl/_stream_load - ```` - -10. Simple mode, import json data - Table Structure: - ``` - `category` varchar(512) NULL COMMENT "", - `author` varchar(512) NULL COMMENT "", - `title` varchar(512) NULL COMMENT "", - `price` double NULL COMMENT "" - ``` - json data format: - ```` - {"category":"C++","author":"avc","title":"C++ primer","price":895} - ```` - - Import command: - - ```` - curl --location-trusted -u root -H "label:123" -H "format: json" -T testData http://host:port/api/testDb/testTbl/_stream_load - ```` - - In order to improve throughput, it supports importing multiple pieces of json data at one time, each line is a json object, and \n is used as a newline by default. You need to set read_json_by_line to true. The json data format is as follows: - - ```` - {"category":"C++","author":"avc","title":"C++ primer","price":89.5} - {"category":"Java","author":"avc","title":"Effective Java","price":95} - {"category":"Linux","author":"avc","title":"Linux kernel","price":195} - ```` - -11. Match pattern, import json data - json data format: - - ```` - [ - {"category":"xuxb111","author":"1avc","title":"SayingsoftheCentury","price":895},{"category":"xuxb222","author":"2avc"," title":"SayingsoftheCentury","price":895}, - {"category":"xuxb333","author":"3avc","title":"SayingsoftheCentury","price":895} - ] - ```` - - Precise import by specifying jsonpath, such as importing only three attributes of category, author, and price - - ```` - curl --location-trusted -u root -H "columns: category, price, author" -H "label:123" -H "format: json" -H "jsonpaths: [\"$.category\",\" $.price\",\"$.author\"]" -H "strip_outer_array: true" -T testData http://host:port/api/testDb/testTbl/_stream_load - ```` - - illustrate: - 1) If the json data starts with an array, and each object in the array is a record, you need to set strip_outer_array to true, which means flatten the array. - 2) If the json data starts with an array, and each object in the array is a record, when setting jsonpath, our ROOT node is actually an object in the array. - -12. User specified json root node - json data format: - - ```` - { - "RECORDS":[ - {"category":"11","title":"SayingsoftheCentury","price":895,"timestamp":1589191587}, - {"category":"22","author":"2avc","price":895,"timestamp":1589191487}, - {"category":"33","author":"3avc","title":"SayingsoftheCentury","timestamp":1589191387} - ] - } - ```` - - Precise import by specifying jsonpath, such as importing only three attributes of category, author, and price - - ```` - curl --location-trusted -u root -H "columns: category, price, author" -H "label:123" -H "format: json" -H "jsonpaths: [\"$.category\",\" $.price\",\"$.author\"]" -H "strip_outer_array: true" -H "json_root: $.RECORDS" -T testData http://host:port/api/testDb/testTbl/_stream_load - ```` - -13. Delete the data with the same import key as this batch - - ```` - curl --location-trusted -u root -H "merge_type: DELETE" -T testData http://host:port/api/testDb/testTbl/_stream_load - ```` - -14. Delete the columns in this batch of data that match the data whose flag is listed as true, and append other rows normally - - ```` - curl --location-trusted -u root: -H "column_separator:," -H "columns: siteid, citycode, username, pv, flag" -H "merge_type: MERGE" -H "delete: flag=1" -T testData http://host:port/api/testDb/testTbl/_stream_load - ```` - -15. Import data into UNIQUE_KEYS table with sequence column - - ```` - curl --location-trusted -u root -H "columns: k1,k2,source_sequence,v1,v2" -H "function_column.sequence_col: source_sequence" -T testData http://host:port/api/testDb/testTbl/ _stream_load - ```` - -16. csv file line header filter import - - file data: - - ``` - id,name,age - 1,doris,20 - 2,flink,10 - ``` - Filter the first line import by specifying `format=csv_with_names` - ``` - curl --location-trusted -u root -T test.csv -H "label:1" -H "format:csv_with_names" -H "column_separator:," http://host:port/api/testDb/testTbl/_stream_load - ``` - -17. Import data into a table whose table field contains DEFAULT CURRENT_TIMESTAMP - - Table Structure: - - ```sql - `id` bigint(30) NOT NULL, - `order_code` varchar(30) DEFAULT NULL COMMENT '', - `create_time` datetimev2(3) DEFAULT CURRENT_TIMESTAMP - ``` - - JSON data format: - - ``` - {"id":1,"order_Code":"avc"} - ``` - - Import command: - - ``` - curl --location-trusted -u root -T test.json -H "label:1" -H "format:json" -H 'columns: id, order_code, create_time=CURRENT_TIMESTAMP()' http://host:port/api/testDb/testTbl/_stream_load - ``` - -### Keywords - - STREAM, LOAD - -### Best Practice - -1. Check the import task status - - Stream Load is a synchronous import process. The successful execution of the statement means that the data is imported successfully. The imported execution result will be returned synchronously through the HTTP return value. And display it in Json format. An example is as follows: - - ````json - { - "TxnId": 17, - "Label": "707717c0-271a-44c5-be0b-4e71bfeacaa5", - "Status": "Success", - "Message": "OK", - "NumberTotalRows": 5, - "NumberLoadedRows": 5, - "NumberFilteredRows": 0, - "NumberUnselectedRows": 0, - "LoadBytes": 28, - "LoadTimeMs": 27, - "BeginTxnTimeMs": 0, - "StreamLoadPutTimeMs": 2, - "ReadDataTimeMs": 0, - "WriteDataTimeMs": 3, - "CommitAndPublishTimeMs": 18 - } - ```` - - The following main explanations are given for the Stream load import result parameters: - - + TxnId: The imported transaction ID. Users do not perceive. - - + Label: Import Label. User specified or automatically generated by the system. - - + Status: Import completion status. - - "Success": Indicates successful import. - - "Publish Timeout": This state also indicates that the import has been completed, except that the data may be delayed and visible without retrying. - - "Label Already Exists": Label duplicate, need to be replaced Label. - - "Fail": Import failed. - - + ExistingJobStatus: The state of the load job corresponding to the existing Label. - - This field is displayed only when the status is "Label Already Exists". The user can know the status of the load job corresponding to Label through this state. "RUNNING" means that the job is still executing, and "FINISHED" means that the job is successful. - - + Message: Import error messages. - - + NumberTotalRows: Number of rows imported for total processing. - - + NumberLoadedRows: Number of rows successfully imported. - - + NumberFilteredRows: Number of rows that do not qualify for data quality. - - + NumberUnselectedRows: Number of rows filtered by where condition. - - + LoadBytes: Number of bytes imported. - - + LoadTimeMs: Import completion time. Unit milliseconds. - - + BeginTxnTimeMs: The time cost for RPC to Fe to begin a transaction, Unit milliseconds. - - + StreamLoadPutTimeMs: The time cost for RPC to Fe to get a stream load plan, Unit milliseconds. - - + ReadDataTimeMs: Read data time, Unit milliseconds. - - + WriteDataTimeMs: Write data time, Unit milliseconds. - - + CommitAndPublishTimeMs: The time cost for RPC to Fe to commit and publish a transaction, Unit milliseconds. - - + ErrorURL: If you have data quality problems, visit this URL to see specific error lines. - - > Note: Since Stream load is a synchronous import mode, import information will not be recorded in Doris system. Users cannot see Stream load asynchronously by looking at import commands. You need to listen for the return value of the create import request to get the import result. - -2. How to correctly submit the Stream Load job and process the returned results. - - Stream Load is a synchronous import operation, so the user needs to wait for the return result of the command synchronously, and decide the next processing method according to the return result. - - The user's primary concern is the `Status` field in the returned result. - - If it is `Success`, everything is fine and you can do other operations after that. - - If the returned result shows a large number of `Publish Timeout`, it may indicate that some resources (such as IO) of the cluster are currently under strain, and the imported data cannot take effect finally. The import task in the state of `Publish Timeout` has succeeded and does not need to be retried. However, it is recommended to slow down or stop the submission of new import tasks and observe the cluster load. - - If the returned result is `Fail`, the import failed, and you need to check the problem according to the specific reason. Once resolved, you can retry with the same Label. - - In some cases, the user's HTTP connection may be disconnected abnormally and the final returned result cannot be obtained. At this point, you can use the same Label to resubmit the import task, and the resubmitted task may have the following results: - - 1. `Status` status is `Success`, `Fail` or `Publish Timeout`. At this point, it can be processed according to the normal process. - 2. The `Status` status is `Label Already Exists`. At this time, you need to continue to view the `ExistingJobStatus` field. If the value of this field is `FINISHED`, it means that the import task corresponding to this Label has been successful, and there is no need to retry. If it is `RUNNING`, it means that the import task corresponding to this Label is still running. At this time, you need to use the same Label to continue to submit repeatedly at intervals (such as 10 seconds) until `Status` is not `Label Already Exists' `, or until the value of the `ExistingJobStatus` field is `FINISHED`. - -3. Cancel the import task - - Import tasks that have been submitted and not yet completed can be canceled with the CANCEL LOAD command. After cancellation, the written data will also be rolled back and will not take effect. - -4. Label, import transaction, multi-table atomicity - - All import tasks in Doris are atomic. And the import of multiple tables in the same import task can also guarantee atomicity. At the same time, Doris can also use the Label mechanism to ensure that the data imported is not lost or heavy. For details, see the [Import Transactions and Atomicity](../../../../data-operate/import/import-scenes/load-atomicity.md) documentation. - -5. Column mapping, derived columns and filtering - - Doris can support very rich column transformation and filtering operations in import statements. Most built-in functions and UDFs are supported. For how to use this function correctly, please refer to the [Column Mapping, Conversion and Filtering](../../../../data-operate/import/import-scenes/load-data-convert.md) document. - -6. Error data filtering - - Doris' import tasks can tolerate a portion of malformed data. The tolerance ratio is set via `max_filter_ratio`. The default is 0, which means that the entire import task will fail when there is an error data. If the user wants to ignore some problematic data rows, the secondary parameter can be set to a value between 0 and 1, and Doris will automatically skip the rows with incorrect data format. - - For some calculation methods of the tolerance rate, please refer to the [Column Mapping, Conversion and Filtering](../../../../data-operate/import/import-scenes/load-data-convert.md) document. - -7. Strict Mode - - The `strict_mode` attribute is used to set whether the import task runs in strict mode. The format affects the results of column mapping, transformation, and filtering, and it also controls the behavior of partial updates. For a detailed description of strict mode, see the [strict mode](../../../../data-operate/import/import-scenes/load-strict-mode.md) documentation. - -8. Timeout - - The default timeout for Stream Load is 10 minutes. from the time the task is submitted. If it does not complete within the timeout period, the task fails. - -9. Limits on data volume and number of tasks - - Stream Load is suitable for importing data within a few GB. Because the data is processed by single-threaded transmission, the performance of importing excessively large data cannot be guaranteed. When a large amount of local data needs to be imported, multiple import tasks can be submitted in parallel. - - Doris also limits the number of import tasks running at the same time in the cluster, usually ranging from 10-20. Import jobs submitted after that will be rejected. - -10. Importing with timezones - - Since Doris currently has no built-in time types for time zones, all `DATETIME` related types only represent absolute points in time, and do not contain time zone information, which does not change due to time zone changes in the Doris system. Therefore, for importing data with a time zone, we uniformly handle it as **converting it to data in a specific target time zone**. In the Doris system, this is the time zone represented by the session variable `time_zone`. - - In the import, on the other hand, our target timezone is specified by the parameter `timezone`, which will replace the session variable `time_zone` when timezone conversions occur, and when computing timezone-sensitive functions. Therefore, if there are no special circumstances, `timezone` should be set in the import transaction to match the `time_zone` of the current Doris cluster. This means that all time data with a time zone will be converted to that time zone. - For example, if the Doris system timezone is "+08:00", and the time column in the imported data contains two pieces of data, "2012-01-01 01:00:00Z" and "2015-12-12 12:12:12-08:00", then after we specify the timezone of the imported transaction via `-H "timezone: +08:00"` during import, both pieces of data will be converted to that timezone, resulting in the results "2012-01-01 09:00:00" and "2015-12-13 04:12:12". - - For a more detailed understanding, see [time-zone](../../../../advanced/time-zone) document. diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/ANALYZE.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/ANALYZE.md deleted file mode 100644 index 2a02d4b5926141..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/ANALYZE.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -{ - "title": "ANALYZE", - "language": "en" -} ---- - - - -## ANALYZE - -### Name - - - -ANALYZE - -### Description - -This statement is used to collect statistical information for various columns. - -```sql -ANALYZE < TABLE | DATABASE table_name | db_name > - [ (column_name [, ...]) ] - [ [ WITH SYNC ] [ WITH SAMPLE PERCENT | ROWS ] ]; -``` - -- `table_name`: The specified target table. It can be in the format `db_name.table_name`. -- `column_name`: The specified target column. It must be an existing column in `table_name`. You can specify multiple column names separated by commas. -- `sync`: Collect statistics synchronously. Returns after collection. If not specified, it executes asynchronously and returns a JOB ID. -- `sample percent | rows`: Collect statistics with sampling. You can specify a sampling percentage or a number of sampling rows. - -### Example - -Collect statistical data for a table with a 10% sampling rate: - -```sql -ANALYZE TABLE lineitem WITH SAMPLE PERCENT 10; -``` - -Collect statistical data for a table with a sample of 100,000 rows: - -```sql -ANALYZE TABLE lineitem WITH SAMPLE ROWS 100000; -``` - -### Keywords - -ANALYZE \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/CANCEL-EXPORT.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/CANCEL-EXPORT.md deleted file mode 100644 index 247ab04727ceee..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/CANCEL-EXPORT.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -{ - "title": "CANCEL-EXPORT", - "language": "en" -} ---- - - - -## CANCEL-EXPORT - -### Name - - - -CANCEL EXPORT - -### Description - -This statement is used to undo an export job for the specified label. Or batch undo export jobs via fuzzy matching - -```sql -CANCEL EXPORT -[FROM db_name] -WHERE [LABEL = "export_label" | LABEL like "label_pattern" | STATE = "PENDING/IN_QUEUE/EXPORTING"] -``` - -### Example - -1. Cancel the export job whose label is `example_db_test_export_label` on the database example_db - - ```sql - CANCEL EXPORT - FROM example_db - WHERE LABEL = "example_db_test_export_label" and STATE = "EXPORTING"; - ```` - -2. Cancel all export jobs containing example* on the database example*db. - - ```sql - CANCEL EXPORT - FROM example_db - WHERE LABEL like "%example%"; - ```` - -3. Cancel all export jobs which state are "PENDING" - - ```sql - CANCEL EXPORT - FROM example_db - WHERE STATE = "PENDING"; - ``` - -### Keywords - - CANCEL, EXPORT - -### Best Practice - -1. Only pending export jobs in PENDING, IN_QUEUE,EXPORTING state can be canceled. -2. When performing batch undo, Doris does not guarantee the atomic undo of all corresponding export jobs. That is, it is possible that only some of the export jobs were successfully undone. The user can view the job status through the SHOW EXPORT statement and try to execute the CANCEL EXPORT statement repeatedly. -3. When the job of the `EXPORTING` state is revoked, part of the data may have been exported to the storage system, and the user needs to process (delete) this section to export data. diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/DELETE.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/DELETE.md deleted file mode 100644 index 1a34bc2dede4e0..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/DELETE.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -{ - "title": "DELETE", - "language": "en" -} ---- - - - -## DELETE - -### Name - -DELETE - -### Description - -This statement is used to conditionally delete data in the specified table (base index) partition. - -This operation will also delete the data of the rollup index related to this base index. - -#### Syntax - -Syntax 1: This syntax can only specify filter predicates - -```SQL -DELETE FROM table_name [PARTITION partition_name | PARTITIONS (partition_name [, partition_name])] -WHERE -column_name op { value | value_list } [ AND column_name op { value | value_list } ...]; -``` - - - -Syntax 2:This syntax can only used on UNIQUE KEY model - -```sql -DELETE FROM table_name - [PARTITION partition_name | PARTITIONS (partition_name [, partition_name])] - [USING additional_tables] - WHERE condition -``` - - - -#### Required Parameters - -+ table_name: Specifies the table from which rows are removed. -+ column_name: column belong to table_name -+ op: Logical comparison operator, The optional types of op include: =, >, <, >=, <=, !=, in, not in -+ value | value_list: value or value list used for logial comparison - - - -+ WHERE condition: Specifies a condition to use to select rows for removal - - - - -#### Optional Parameters - -+ PARTITION partition_name | PARTITIONS (partition_name [, partition_name]): Specifies the partition or partitions to select rows for removal - - - -+ table_alias: alias of table -+ USING additional_tables: If you need to refer to additional tables in the WHERE clause to help identify the rows to be removed, then specify those table names in the USING clause. You can also use the USING clause to specify subqueries that identify the rows to be removed. - - - -#### Note - -1. Only conditions on the key column can be specified when using AGGREGATE (UNIQUE) model. -2. When the selected key column does not exist in a rollup, delete cannot be performed. -3. Wheny you use syntax 1, conditions can only have an "and" relationship. If you want to achieve an "or" relationship, you need to write the conditions in two DELETE statements. -4. In syntax 1, if it is a partitioned table, you can specify a partition. If not specified, Doris will infer partition from the given conditions. In two cases, Doris cannot infer the partition from conditions: 1) the conditions do not contain partition columns; 2) The operator of the partition column is not in. When a partition table does not specify the partition, or the partition cannot be inferred from the conditions, the session variable delete_without_partition needs to be true to make delete statement be applied to all partitions. -5. This statement may reduce query efficiency for a period of time after execution. The degree of impact depends on the number of delete conditions specified in the statement. The more conditions you specify, the greater the impact. - -### Example - -1. Delete the data row whose k1 column value is 3 in my_table partition p1 - - ```sql - DELETE FROM my_table PARTITION p1 - WHERE k1 = 3; - ```` - -2. Delete the data rows where the value of column k1 is greater than or equal to 3 and the value of column k2 is "abc" in my_table partition p1 - - ```sql - DELETE FROM my_table PARTITION p1 - WHERE k1 >= 3 AND k2 = "abc"; - ```` - -3. Delete the data rows where the value of column k1 is greater than or equal to 3 and the value of column k2 is "abc" in my_table partition p1, p2 - - ```sql - DELETE FROM my_table PARTITIONS (p1, p2) - WHERE k1 >= 3 AND k2 = "abc"; - ```` - - - -4. use the result of `t2` join `t3` to romve rows from `t1`,delete table only support unique key model - - ```sql - -- create t1, t2, t3 tables - CREATE TABLE t1 - (id INT, c1 BIGINT, c2 STRING, c3 DOUBLE, c4 DATE) - UNIQUE KEY (id) - DISTRIBUTED BY HASH (id) - PROPERTIES('replication_num'='1', "function_column.sequence_col" = "c4"); - - CREATE TABLE t2 - (id INT, c1 BIGINT, c2 STRING, c3 DOUBLE, c4 DATE) - DISTRIBUTED BY HASH (id) - PROPERTIES('replication_num'='1'); - - CREATE TABLE t3 - (id INT) - DISTRIBUTED BY HASH (id) - PROPERTIES('replication_num'='1'); - - -- insert data - INSERT INTO t1 VALUES - (1, 1, '1', 1.0, '2000-01-01'), - (2, 2, '2', 2.0, '2000-01-02'), - (3, 3, '3', 3.0, '2000-01-03'); - - INSERT INTO t2 VALUES - (1, 10, '10', 10.0, '2000-01-10'), - (2, 20, '20', 20.0, '2000-01-20'), - (3, 30, '30', 30.0, '2000-01-30'), - (4, 4, '4', 4.0, '2000-01-04'), - (5, 5, '5', 5.0, '2000-01-05'); - - INSERT INTO t3 VALUES - (1), - (4), - (5); - - -- remove rows from t1 - DELETE FROM t1 - USING t2 INNER JOIN t3 ON t2.id = t3.id - WHERE t1.id = t2.id; - ``` - - the expect result is only remove the row where id = 1 in table t1 - - ``` - +----+----+----+--------+------------+ - | id | c1 | c2 | c3 | c4 | - +----+----+----+--------+------------+ - | 2 | 2 | 2 | 2.0 | 2000-01-02 | - | 3 | 3 | 3 | 3.0 | 2000-01-03 | - +----+----+----+--------+------------+ - ``` - - - -### Keywords - - DELETE - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/EXPORT.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/EXPORT.md deleted file mode 100644 index c6479d0e8245a2..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/EXPORT.md +++ /dev/null @@ -1,381 +0,0 @@ ---- -{ - "title": "EXPORT", - "language": "en" -} ---- - - - -## EXPORT - -### Name - -EXPORT - -### Description - -The `EXPORT` command is used to export the data of a specified table to a designated location as a file. Currently, it supports exporting to remote storage such as HDFS, S3, BOS, and COS (Tencent Cloud) through Broker process, S3 protocol, or HDFS protocol. - -`EXPORT` is an asynchronous operation, and the command submits an `EXPORT JOB` to Doris. The task will be successfully submitted and returns immediately. After execution, you can use the [SHOW EXPORT](../../Show-Statements/SHOW-EXPORT.md) to view the progress. - -**grammar** - - ```sql - EXPORT TABLE table_name - [PARTITION (p1[,p2])] - [WHERE] - TO export_path - [opt_properties] - WITH BROKER/S3/HDFS - [broker_properties]; - ``` - -**principle** - -The bottom layer of the `Export` statement actually executes the `select...outfile..` statement. The `Export` task will be decomposed into multiple `select...outfile..` statements to execute concurrently according to the value of the `parallelism` parameter. Each `select...outfile..` is responsible for exporting some tablets of table. - -**illustrate:** - -- `table_name` - - The table name of the table currently being exported. Only the export of Doris local table / View / External Table data is supported. - -- `partition` - - It is possible to export only some specified partitions of the specified table - -- `export_path` - - The exported file path can be a directory or a file directory with a file prefix, for example: `hdfs://path/to/my_file_` - -- `opt_properties` - - Used to specify some export parameters. - - ```sql - [PROPERTIES ("key"="value", ...)] - ```` - - The following parameters can be specified: - - - `label`: This parameter is optional, specifies the label of the export task. If this parameter is not specified, the system randomly assigns a label to the export task. - - - `column_separator`: Specifies the exported column separator, default is `\t`, mulit-bytes is supported. This parameter is only used for `CSV` file format. - - - `line_delimiter`: Specifies the line delimiter for export, the default is `\n`, mulit-bytes is supported. This parameter is only used for `CSV` file format. - - - `timeout`: The timeout period of the export job, the default is 2 hours, the unit is seconds. - - - `columns`: Specifies certain columns of the export job table - - - `format`: Specifies the file format, support: parquet, orc, csv, csv_with_names, csv_with_names_and_types.The default is csv format. - - - `parallelism`: The concurrency degree of the `export` job, the default is `1`. The export job will be divided into `select..outfile..` statements of the number of `parallelism` to execute concurrently. (If the value of `parallelism` is greater than the number of tablets in the table, the system will automatically set `parallelism` to the number of tablets, that is, each `select..outfile..` statement is responsible for one tablet) - - - `delete_existing_files`: default `false`. If it is specified as true, you will first delete all files specified in the directory specified by the file_path, and then export the data to the directory.For example: "file_path" = "/user/tmp", then delete all files and directory under "/user/"; "file_path" = "/user/tmp/", then delete all files and directory under "/user/tmp/" - - - `max_file_size`: it is the limit for the size of a single file in the export job. If the result file exceeds this value, it will be split into multiple files. The valid range for `max_file_size` is [5MB, 2GB], with a default value of 1GB. (When exporting to the ORC file format, the actual size of the split files will be multiples of 64MB, for example, if max_file_size is specified as 5MB, the actual split size will be 64MB; if max_file_size is specified as 65MB, the actual split size will be 128MB.) - - - `with_bom`: The default is false. If it is set to true, the exported file is encoded in UTF8 with BOM (valid only for CSV-related file format). - - - `timeout`: This is the timeout parameter of the export job, the default timeout is 2 hours, and the unit is seconds. - - > Note that to use the `delete_existing_files` parameter, you also need to add the configuration `enable_delete_existing_files = true` to the fe.conf file and restart the FE. Only then will the `delete_existing_files` parameter take effect. Setting `delete_existing_files = true` is a dangerous operation and it is recommended to only use it in a testing environment. - -- `WITH BROKER` - - The export function needs to write data to the remote storage through the Broker process. Here you need to define the relevant connection information for the broker to use. - - ```sql - WITH BROKER "broker_name" - ("key"="value"[,...]) - - Broker related properties: - username: user name - password: password - hadoop.security.authentication: specify the authentication method as kerberos - kerberos_principal: specifies the principal of kerberos - kerberos_keytab: specifies the path to the keytab file of kerberos. The file must be the absolute path to the file on the server where the broker process is located. and can be accessed by the Broker process - ```` - -- `WITH HDFS` - - You can directly write data to the remote HDFS. - - - ```sql - WITH HDFS ("key"="value"[,...]) - - HDFS related properties: - fs.defaultFS: namenode address and port - hadoop.username: hdfs username - dfs.nameservices: if hadoop enable HA, please set fs nameservice. See hdfs-site.xml - dfs.ha.namenodes.[nameservice ID]:unique identifiers for each NameNode in the nameservice. See hdfs-site.xml - dfs.namenode.rpc-address.[nameservice ID].[name node ID]: the fully-qualified RPC address for each NameNode to listen on. See hdfs-site.xml - dfs.client.failover.proxy.provider.[nameservice ID]:the Java class that HDFS clients use to contact the Active NameNode, usually it is org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider - - For a kerberos-authentication enabled Hadoop cluster, additional properties need to be set: - dfs.namenode.kerberos.principal: HDFS namenode service principal - hadoop.security.authentication: kerberos - hadoop.kerberos.principal: the Kerberos pincipal that Doris will use when connectiong to HDFS. - hadoop.kerberos.keytab: HDFS client keytab location. - ``` - -- `WITH S3` - - You can directly write data to a remote S3 object store - - ```sql - WITH S3 ("key"="value"[,...]) - - S3 related properties: - AWS_ENDPOINT - AWS_ACCESS_KEY - AWS_SECRET_KEY - AWS_REGION - use_path_style: (optional) default false . The S3 SDK uses the virtual-hosted style by default. However, some object storage systems may not be enabled or support virtual-hosted style access. At this time, we can add the use_path_style parameter to force the use of path style access method. - ``` - -### Example - -#### export to local - -> Export data to the local file system needs to add `enable_outfile_to_local = true` to the fe.conf and restart the Fe. - -1. You can export the `test` table to a local store. Export csv format file by default. - -```sql -EXPORT TABLE test TO "file:///home/user/tmp/"; -``` - -2. You can export the k1 and k2 columns in `test` table to a local store, and set export label. Export csv format file by default. - -```sql -EXPORT TABLE test TO "file:///home/user/tmp/" -PROPERTIES ( - "label" = "label1", - "columns" = "k1,k2" -); -``` - -3. You can export the rows where `k1 < 50` in `test` table to a local store, and set column_separator to `,`. Export csv format file by default. - -```sql -EXPORT TABLE test WHERE k1 < 50 TO "file:///home/user/tmp/" -PROPERTIES ( - "columns" = "k1,k2", - "column_separator"="," -); -``` - -4. Export partitions p1 and p2 from the test table to local storage, with the default exported file format being csv. - -```sql -EXPORT TABLE test PARTITION (p1,p2) TO "file:///home/user/tmp/" -PROPERTIES ("columns" = "k1,k2"); - ``` - -5. Export all data in the test table to local storage with a non-default file format. - -```sql -// parquet file format -EXPORT TABLE test TO "file:///home/user/tmp/" -PROPERTIES ( - "columns" = "k1,k2", - "format" = "parquet" -); - -// orc file format -EXPORT TABLE test TO "file:///home/user/tmp/" -PROPERTIES ( - "columns" = "k1,k2", - "format" = "orc" -); - -// csv_with_names file format. Using 'AA' as the column delimiter and 'zz' as the line delimiter. -EXPORT TABLE test TO "file:///home/user/tmp/" -PROPERTIES ( - "format" = "csv_with_names", - "column_separator"="AA", - "line_delimiter" = "zz" -); - -// csv_with_names_and_types file format -EXPORT TABLE test TO "file:///home/user/tmp/" -PROPERTIES ( - "format" = "csv_with_names_and_types" -); - -``` - -6. set max_file_sizes - -```sql -EXPORT TABLE test TO "file:///home/user/tmp/" -PROPERTIES ( - "format" = "parquet", - "max_file_size" = "5MB" -); -``` - -When the exported file size is larger than 5MB, the data will be split into multiple files, with each file containing a maximum of 5MB. - -7. set parallelism -```sql -EXPORT TABLE test TO "file:///home/user/tmp/" -PROPERTIES ( - "format" = "parquet", - "max_file_size" = "5MB", - "parallelism" = "5" -); -``` - -8. set delete_existing_files - -```sql -EXPORT TABLE test TO "file:///home/user/tmp" -PROPERTIES ( - "format" = "parquet", - "max_file_size" = "5MB", - "delete_existing_files" = "true" -) -``` - -Before exporting data, all files and directories in the `/home/user/` directory will be deleted, and then the data will be exported to that directory. - -#### export with S3 - -1. Export all data from the `testTbl` table to S3 using invisible character '\x07' as a delimiter for columns and rows.If you want to export data to minio, you also need to specify use_path_style=true. - - ```sql - EXPORT TABLE testTbl TO "s3://bucket/a/b/c" - PROPERTIES ( - "column_separator"="\\x07", - "line_delimiter" = "\\x07" - ) WITH s3 ( - "s3.endpoint" = "xxxxx", - "s3.region" = "xxxxx", - "s3.secret_key"="xxxx", - "s3.access_key" = "xxxxx" - ) - ``` - -2. Export all data in the test table to HDFS in the format of parquet, limit the size of a single file to 1024MB, and reserve all files in the specified directory. - -#### export with HDFS -1. Export all data from the `test` table to HDFS in `Parquet` format, with a limit of 512MB for the size of a single file in the export job, and retain all files under the specified directory. - - ```sql - EXPORT TABLE test TO "hdfs://hdfs_host:port/a/b/c/" - PROPERTIES( - "format" = "parquet", - "max_file_size" = "512MB", - "delete_existing_files" = "false" - ) - with HDFS ( - "fs.defaultFS"="hdfs://hdfs_host:port", - "hadoop.username" = "hadoop" - ); - ``` - -#### export with Broker -You need to first start the broker process and add it to the FE before proceeding. -1. Export the `test` table to hdfs - - ```sql - EXPORT TABLE test TO "hdfs://hdfs_host:port/a/b/c" - WITH BROKER "broker_name" - ( - "username"="xxx", - "password"="yyy" - ); - ``` - -2. Export partitions 'p1' and 'p2' from the 'testTbl' table to HDFS using ',' as the column delimiter and specifying a label. - - ```sql - EXPORT TABLE testTbl PARTITION (p1,p2) TO "hdfs://hdfs_host:port/a/b/c" - PROPERTIES ( - "label" = "mylabel", - "column_separator"="," - ) - WITH BROKER "broker_name" - ( - "username"="xxx", - "password"="yyy" - ); - ``` - -3. Export all data from the 'testTbl' table to HDFS using the non-visible character '\x07' as the column and row delimiter. - -```sql -EXPORT TABLE testTbl TO "hdfs://hdfs_host:port/a/b/c" -PROPERTIES ( - "column_separator"="\\x07", - "line_delimiter" = "\\x07" -) -WITH BROKER "broker_name" -( - "username"="xxx", - "password"="yyy" -) -``` - -### Keywords - - EXPORT - -### Best Practice - - #### Concurrent Export - - An Export job can be configured with the `parallelism` parameter to concurrently export data. The `parallelism` parameter specifies the number of threads to execute the `EXPORT Job`. Each thread is responsible for exporting a subset of the total tablets. - - The underlying execution logic of an `Export Job `is actually the `SELECT INTO OUTFILE` statement. Each thread specified by the `parallelism` parameter executes independent `SELECT INTO OUTFILE` statements. - - The specific logic for splitting an `Export Job` into multiple `SELECT INTO OUTFILE` is, to evenly distribute all the tablets of the table among all parallel threads. For example: - - - If num(tablets) = 40 and parallelism = 3, then the three threads will be responsible for 14, 13, and 13 tablets, respectively. - - If num(tablets) = 2 and parallelism = 3, then Doris automatically sets the parallelism to 2, and each thread is responsible for one tablet. - - When the number of tablets responsible for a thread exceeds the `maximum_tablets_of_outfile_in_export` value (default is 10, and can be modified by adding the `maximum_tablets_of_outfile_in_export` parameter in fe.conf), the thread will split the tablets which are responsibled for this thread into multiple `SELECT INTO OUTFILE` statements. For example: - - - If a thread is responsible for 14 tablets and `maximum_tablets_of_outfile_in_export = 10`, then the thread will be responsible for two `SELECT INTO OUTFILE` statements. The first `SELECT INTO OUTFILE` statement exports 10 tablets, and the second `SELECT INTO OUTFILE` statement exports 4 tablets. The two `SELECT INTO OUTFILE` statements are executed serially by this thread. - - #### memory limit - - The query plan for an `Export Job` typically involves only `scanning and exporting`, and does not involve compute logic that requires a lot of memory. Therefore, the default memory limit of 2GB is usually sufficient to meet the requirements. - - However, in certain scenarios, such as a query plan that requires scanning too many tablets on the same BE, or when there are too many data versions of tablets, it may result in insufficient memory. In these cases, you can adjust the session variable `exec_mem_limit` to increase the memory usage limit. - - #### Precautions - - - Exporting a large amount of data at one time is not recommended. The maximum recommended export data volume for an Export job is several tens of GB. An overly large export results in more junk files and higher retry costs. If the amount of table data is too large, it is recommended to export by partition. - - - If the Export job fails, the generated files will not be deleted, and the user needs to delete it manually. - - - The Export job only exports the data of the Base table / View / External table, not the data of the materialized view. - - - The export job scans data and occupies IO resources, which may affect the query latency of the system. - - - Currently, The `Export Job` is simply check whether the `Tablets version` is the same, it is recommended not to import data during the execution of the `Export Job`. - - - The maximum number of partitions that an `Export job` allows is 2000. You can add a parameter to the fe.conf `maximum_number_of_export_partitions` and restart FE to modify the setting. diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT-OVERWRITE.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT-OVERWRITE.md deleted file mode 100644 index 7293785ba536ef..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT-OVERWRITE.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -{ - "title": "INSERT-OVERWRITE", - "language": "en" -} - ---- - - - -## INSERT OVERWRITE - -### Name - -INSERT OVERWRITE - -### Description - -The function of this statement is to overwrite a table or some partitions of a table - -```sql -INSERT OVERWRITE table table_name - [ PARTITION (p1, ... | *) ] - [ WITH LABEL label] - [ (column [, ...]) ] - [ [ hint [, ...] ] ] - { VALUES ( { expression | DEFAULT } [, ...] ) [, ...] | query } -``` - - Parameters - -> table_name: the destination table to overwrite. This table must exist. It can be of the form `db_name.table_name` -> -> partitions: the table partitions that needs to be overwritten. The following two formats are supported -> ->> 1. partition names. must be one of the existing partitions in `table_name` separated by a comma ->> 2. asterisk(*)。Enable [auto-detect-partition](#overwrite-auto-detect-partition). The write operation will automatically detect the partitions involved in the data and overwrite those partitions. -> -> label: specify a label for the Insert task -> -> column_name: the specified destination column must be one of the existing columns in `table_name` -> -> expression: the corresponding expression that needs to be assigned to a column -> -> DEFAULT: let the column use the default value -> -> query: a common query, the result of the query will overwrite the target. -> -> hint: some indicator used to indicate the execution behavior of `INSERT`. You can choose one of this values: `/*+ STREAMING */`, `/*+ SHUFFLE */` or `/*+ NOSHUFFLE */. -> -> 1. STREAMING: At present, it has no practical effect and is only reserved for compatibility with previous versions. (In the previous version, adding this hint would return a label, but now it defaults to returning a label) -> 2. SHUFFLE: When the target table is a partition table, enabling this hint will do repartiiton. -> 3. NOSHUFFLE: Even if the target table is a partition table, repartiiton will not be performed, but some other operations will be performed to ensure that the data is correctly dropped into each partition. - -Notice: - -1. In the current version, the session variable `enable_insert_strict` is set to `true` by default. If some data that does not conform to the format of the target table is filtered out during the execution of the `INSERT OVERWRITE` statement, such as when overwriting a partition and not all partition conditions are satisfied, overwriting the target table will fail. -2. If the target table of the INSERT OVERWRITE is an [AUTO-PARTITION-table](../../../../advanced/partition/auto-partition), then new partitions can be created if PARTITION is not specified (that is, rewrite the whole table). If PARTITION for overwrite is specified(Includes automatic detection and overwriting of partitions through the `partition(*)` syntax), then the AUTO PARTITION table behaves as if it were a normal partitioned table during this process, and data that does not satisfy the existing partition conditions is filtered instead of creating a new partition. -3. The `INSERT OVERWRITE` statement first creates a new table, inserts the data to be overwritten into the new table, and then atomically replaces the old table with the new table and modifies its name. Therefore, during the process of overwriting the table, the data in the old table can still be accessed normally until the overwriting is completed. - -### Example - -Assuming there is a table named `test`. The table contains two columns `c1` and `c2`, and two partitions `p1` and `p2` - -```sql -CREATE TABLE IF NOT EXISTS test ( - `c1` int NOT NULL DEFAULT "1", - `c2` int NOT NULL DEFAULT "4" -) ENGINE=OLAP -UNIQUE KEY(`c1`) -PARTITION BY LIST (`c1`) -( -PARTITION p1 VALUES IN ("1","2","3"),# Partition p1 only allows 1, 2, and 3 to exist. -PARTITION p2 VALUES IN ("4","5","6") # Partition p2 only allows 1, 5, and 6 to exist. -) -DISTRIBUTED BY HASH(`c1`) BUCKETS 3 -PROPERTIES ( - "replication_allocation" = "tag.location.default: 1", - "in_memory" = "false", - "storage_format" = "V2" -); -``` - -#### Overwrite Table - -1. Overwrite the `test` table using the form of `VALUES`. - - ```sql - // Single-row overwrite. - INSERT OVERWRITE table test VALUES (1, 2); - INSERT OVERWRITE table test (c1, c2) VALUES (1, 2); - INSERT OVERWRITE table test (c1, c2) VALUES (1, DEFAULT); - INSERT OVERWRITE table test (c1) VALUES (1); - // Multi-row overwrite. - INSERT OVERWRITE table test VALUES (1, 2), (3, 2 + 2); - INSERT OVERWRITE table test (c1, c2) VALUES (1, 2), (3, 2 * 2); - INSERT OVERWRITE table test (c1, c2) VALUES (1, DEFAULT), (3, DEFAULT); - INSERT OVERWRITE table test (c1) VALUES (1), (3); - ``` - -- The first and second statements have the same effect. If the target column is not specified during overwriting, the column order in the table will be used as the default target column. After the overwrite is successful, there is only one row of data in the `test` table. -- The third and fourth statements have the same effect. The unspecified column `c2` will be overwritten with the default value 4. After the overwrite is successful, there is only one row of data in the `test` table. -- The fifth and sixth statements have the same effect. Expressions (such as `2+2`, `2*2`) can be used in the statement. The result of the expression will be computed during the execution of the statement and then overwritten into the `test` table. After the overwrite is successful, there are two rows of data in the `test` table. -- The seventh and eighth statements have the same effect. The unspecified column `c2` will be overwritten with the default value 4. After the overwrite is successful, there are two rows of data in the `test` table. - -2. Overwrite the `test` table in the form of a query statement. The data format of the `test2` table and the `test` table must be consistent. If they are not consistent, implicit data type conversion will be triggered. - - ```sql - INSERT OVERWRITE table test SELECT * FROM test2; - INSERT OVERWRITE table test (c1, c2) SELECT * from test2; - ``` - -- The first and second statements have the same effect. The purpose of these statements is to take data from the `test2` table and overwrite the `test` table with the taken data. After the overwrite is successful, the data in the `test` table will be consistent with the data in the `test2` table. - -3. Overwrite the `test` table and specify a label. - - ```sql - INSERT OVERWRITE table test WITH LABEL `label1` SELECT * FROM test2; - INSERT OVERWRITE table test WITH LABEL `label2` (c1, c2) SELECT * from test2; - ``` - -- Using a label will encapsulate this task into an **asynchronous task**. After executing the statement, the relevant operations will be executed asynchronously. Users can use the `SHOW LOAD;` command to check the status of the job imported by this `label`. It should be noted that the label is unique. - - -#### Overwrite Table Partition - -When using INSERT OVERWRITE to rewrite partitions, we actually encapsulate the following three steps into a single transaction and execute it. If it fails halfway through, the operations that have been performed will be rolled back: -1. Assuming that partition `p1` is specified to be rewritten, first create an empty temporary partition `pTMP` with the same structure as the target partition to be rewritten. -2. Write data to `pTMP`. -3. replace `p1` with the `pTMP` atom - -The following is examples: - -1. Overwrite partitions `P1` and `P2` of the `test` table using the form of `VALUES`. - - ```sql - // Single-row overwrite. - INSERT OVERWRITE table test PARTITION(p1,p2) VALUES (1, 2); - INSERT OVERWRITE table test PARTITION(p1,p2) (c1, c2) VALUES (1, 2); - INSERT OVERWRITE table test PARTITION(p1,p2) (c1, c2) VALUES (1, DEFAULT); - INSERT OVERWRITE table test PARTITION(p1,p2) (c1) VALUES (1); - // Multi-row overwrite. - INSERT OVERWRITE table test PARTITION(p1,p2) VALUES (1, 2), (4, 2 + 2); - INSERT OVERWRITE table test PARTITION(p1,p2) (c1, c2) VALUES (1, 2), (4, 2 * 2); - INSERT OVERWRITE table test PARTITION(p1,p2) (c1, c2) VALUES (1, DEFAULT), (4, DEFAULT); - INSERT OVERWRITE table test PARTITION(p1,p2) (c1) VALUES (1), (4); - ``` - - Unlike overwriting an entire table, the above statements are overwriting partitions in the table. Partitions can be overwritten one at a time or multiple partitions can be overwritten at once. It should be noted that only data that satisfies the corresponding partition filtering condition can be overwritten successfully. If there is data in the overwritten data that does not satisfy any of the partitions, the overwrite will fail. An example of a failure is shown below. - - ```sql - INSERT OVERWRITE table test PARTITION(p1,p2) VALUES (7, 2); - ``` - - The data overwritten by the above statements (`c1=7`) does not satisfy the conditions of partitions `P1` and `P2`, so the overwrite will fail. - -2. Overwrite partitions `P1` and `P2` of the `test` table in the form of a query statement. The data format of the `test2` table and the `test` table must be consistent. If they are not consistent, implicit data type conversion will be triggered. - - ```sql - INSERT OVERWRITE table test PARTITION(p1,p2) SELECT * FROM test2; - INSERT OVERWRITE table test PARTITION(p1,p2) (c1, c2) SELECT * from test2; - ``` - -3. Overwrite partitions `P1` and `P2` of the `test` table and specify a label. - - ```sql - INSERT OVERWRITE table test PARTITION(p1,p2) WITH LABEL `label3` SELECT * FROM test2; - INSERT OVERWRITE table test PARTITION(p1,p2) WITH LABEL `label4` (c1, c2) SELECT * from test2; - ``` - - -#### Overwrite Auto Detect Partition - -When the PARTITION clause specified by the INSERT OVERWRITE command is `PARTITION(*)`, this overwrite will automatically detect the partition where the data is located. Example: - -```sql -mysql> create table test( - -> k0 int null - -> ) - -> partition by range (k0) - -> ( - -> PARTITION p10 values less than (10), - -> PARTITION p100 values less than (100), - -> PARTITION pMAX values less than (maxvalue) - -> ) - -> DISTRIBUTED BY HASH(`k0`) BUCKETS 1 - -> properties("replication_num" = "1"); -Query OK, 0 rows affected (0.11 sec) - -mysql> insert into test values (1), (2), (15), (100), (200); -Query OK, 5 rows affected (0.29 sec) - -mysql> select * from test order by k0; -+------+ -| k0 | -+------+ -| 1 | -| 2 | -| 15 | -| 100 | -| 200 | -+------+ -5 rows in set (0.23 sec) - -mysql> insert overwrite table test partition(*) values (3), (1234); -Query OK, 2 rows affected (0.24 sec) - -mysql> select * from test order by k0; -+------+ -| k0 | -+------+ -| 3 | -| 15 | -| 1234 | -+------+ -3 rows in set (0.20 sec) -``` - -As you can see, all data in partitions `p10` and `pMAX`, where data 3 and 1234 are located, are overwritten, while partition `p100` remains unchanged. This operation can be interpreted as syntactic sugar for specifying a specific partition to be overwritten by the PARTITION clause during an INSERT OVERWRITE operation, which is implemented in the same way as [specify a partition to overwrite](#overwrite-table-partition). The `PARTITION(*)` syntax eliminates the need to manually fill in all the partition names when overwriting a large number of partitions. - -### Keywords - - INSERT OVERWRITE, OVERWRITE, AUTO DETECT diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT.md deleted file mode 100644 index 4346aede1a5924..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/INSERT.md +++ /dev/null @@ -1,235 +0,0 @@ ---- -{ - "title": "INSERT", - "language": "en" -} ---- - - - -## INSERT - -### Name - -INSERT - -### Description - -The change statement is to complete the data insertion operation. - -```sql -INSERT INTO table_name - [ PARTITION (p1, ...) ] - [ WITH LABEL label] - [ (column [, ...]) ] - [ [ hint [, ...] ] ] - { VALUES ( { expression | DEFAULT } [, ...] ) [, ...] | query } -```` - - Parameters - -> tablet_name: The destination table for importing data. Can be of the form `db_name.table_name` -> -> partitions: Specify the partitions to be imported, which must be partitions that exist in `table_name`. Multiple partition names are separated by commas -> -> label: specify a label for the Insert task -> -> column_name: The specified destination column, must be a column that exists in `table_name` -> -> expression: the corresponding expression that needs to be assigned to a column -> -> DEFAULT: let the corresponding column use the default value -> -> query: a common query, the result of the query will be written to the target -> -> hint: some indicator used to indicate the execution behavior of `INSERT`. You can choose one of this values: `/*+ STREAMING */`, `/*+ SHUFFLE */` or `/*+ NOSHUFFLE */. -> 1. STREAMING: At present, it has no practical effect and is only reserved for compatibility with previous versions. (In the previous version, adding this hint would return a label, but now it defaults to returning a label) -> 2. SHUFFLE: When the target table is a partition table, enabling this hint will do repartiiton. -> 3. NOSHUFFLE: Even if the target table is a partition table, repartiiton will not be performed, but some other operations will be performed to ensure that the data is correctly dropped into each partition. - -For a Unique table with merge-on-write enabled, you can also perform partial columns updates using the insert statement. To perform partial column updates with the insert statement, you need to set the session variable enable_unique_key_partial_update to true (the default value for this variable is false, meaning partial columns updates with the insert statement are not allowed by default). When performing partial columns updates, the columns being inserted must contain at least all the Key columns and specify the columns you want to update. If the Key column values for the inserted row already exist in the original table, the data in the row with the same key column values will be updated. If the Key column values for the inserted row do not exist in the original table, a new row will be inserted into the table. In this case, columns not specified in the insert statement must either have default values or be nullable. These missing columns will first attempt to be populated with default values, and if a column has no default value, it will be filled with null. If a column cannot be null, the insert operation will fail. - -Please note that the default value of the session variable `enable_insert_strict`, which controls whether the insert statement operates in strict mode, is true. In other words, the insert statement is in strict mode by default, and in this mode, updating non-existing keys in partial column updates is not allowed. Therefore, when using the insert statement for partial columns update and wishing to insert non-existing keys, you need to set `enable_unique_key_partial_update` to true and simultaneously set `enable_insert_strict` to false. - -Notice: - -When executing the `INSERT` statement, the default behavior is to filter the data that does not conform to the target table format, such as the string is too long. However, for business scenarios that require data not to be filtered, you can set the session variable `enable_insert_strict` to `true` to ensure that `INSERT` will not be executed successfully when data is filtered out. - -### Example - -The `test` table contains two columns `c1`, `c2`. - -1. Import a row of data into the `test` table - -```sql -INSERT INTO test VALUES (1, 2); -INSERT INTO test (c1, c2) VALUES (1, 2); -INSERT INTO test (c1, c2) VALUES (1, DEFAULT); -INSERT INTO test (c1) VALUES (1); -```` - -The first and second statements have the same effect. When no target column is specified, the column order in the table is used as the default target column. -The third and fourth statements express the same meaning, use the default value of the `c2` column to complete the data import. - -2. Import multiple rows of data into the `test` table at one time - -```sql -INSERT INTO test VALUES (1, 2), (3, 2 + 2); -INSERT INTO test (c1, c2) VALUES (1, 2), (3, 2 * 2); -INSERT INTO test (c1) VALUES (1), (3); -INSERT INTO test (c1, c2) VALUES (1, DEFAULT), (3, DEFAULT); -```` - -The first and second statements have the same effect, import two pieces of data into the `test` table at one time -The effect of the third and fourth statements is known, and the default value of the `c2` column is used to import two pieces of data into the `test` table - -3. Import a query result into the `test` table - -```sql -INSERT INTO test SELECT * FROM test2; -INSERT INTO test (c1, c2) SELECT * from test2; -```` - -4. Import a query result into the `test` table, specifying the partition and label - -```sql -INSERT INTO test PARTITION(p1, p2) WITH LABEL `label1` SELECT * FROM test2; -INSERT INTO test WITH LABEL `label1` (c1, c2) SELECT * from test2; -```` - - -### Keywords - - INSERT - -### Best Practice - -1. View the returned results - - The INSERT operation is a synchronous operation, and the return of the result indicates the end of the operation. Users need to perform corresponding processing according to the different returned results. - - 1. The execution is successful, the result set is empty - - If the result set of the insert corresponding to the select statement is empty, it will return as follows: - - ```sql - mysql> insert into tbl1 select * from empty_tbl; - Query OK, 0 rows affected (0.02 sec) - ``` - - `Query OK` indicates successful execution. `0 rows affected` means that no data was imported. - - 2. The execution is successful, the result set is not empty - - In the case where the result set is not empty. The returned results are divided into the following situations: - - 1. Insert executes successfully and is visible: - - ```sql - mysql> insert into tbl1 select * from tbl2; - Query OK, 4 rows affected (0.38 sec) - {'label':'insert_8510c568-9eda-4173-9e36-6adc7d35291c', 'status':'visible', 'txnId':'4005'} - - mysql> insert into tbl1 with label my_label1 select * from tbl2; - Query OK, 4 rows affected (0.38 sec) - {'label':'my_label1', 'status':'visible', 'txnId':'4005'} - - mysql> insert into tbl1 select * from tbl2; - Query OK, 2 rows affected, 2 warnings (0.31 sec) - {'label':'insert_f0747f0e-7a35-46e2-affa-13a235f4020d', 'status':'visible', 'txnId':'4005'} - - mysql> insert into tbl1 select * from tbl2; - Query OK, 2 rows affected, 2 warnings (0.31 sec) - {'label':'insert_f0747f0e-7a35-46e2-affa-13a235f4020d', 'status':'committed', 'txnId':'4005'} - ```` - - `Query OK` indicates successful execution. `4 rows affected` means that a total of 4 rows of data were imported. `2 warnings` indicates the number of lines to be filtered. - - Also returns a json string: - - ````json - {'label':'my_label1', 'status':'visible', 'txnId':'4005'} - {'label':'insert_f0747f0e-7a35-46e2-affa-13a235f4020d', 'status':'committed', 'txnId':'4005'} - {'label':'my_label1', 'status':'visible', 'txnId':'4005', 'err':'some other error'} - ```` - - `label` is a user-specified label or an automatically generated label. Label is the ID of this Insert Into import job. Each import job has a unique Label within a single database. - - `status` indicates whether the imported data is visible. Show `visible` if visible, `committed` if not visible. - - `txnId` is the id of the import transaction corresponding to this insert. - - The `err` field shows some other unexpected errors. - - When you need to view the filtered rows, the user can pass the following statement - - ```sql - show load where label="xxx"; - ```` - - The URL in the returned result can be used to query the wrong data. For details, see the summary of **Viewing Error Lines** later. - - **Invisibility of data is a temporary state, this batch of data will eventually be visible** - - You can view the visible status of this batch of data with the following statement: - - ```sql - show transaction where id=4005; - ```` - - If the `TransactionStatus` column in the returned result is `visible`, the representation data is visible. - - 3. Execution failed - - Execution failure indicates that no data was successfully imported, and the following is returned: - - ```sql - mysql> insert into tbl1 select * from tbl2 where k1 = "a"; - ERROR 1064 (HY000): all partitions have no load data. url: http://10.74.167.16:8042/api/_load_error_log?file=__shard_2/error_log_insert_stmt_ba8bb9e158e4879-ae8de8507c0bf8a2_ba8bb9e158e4879_ae8de8507c0 - ``` - - Where `ERROR 1064 (HY000): all partitions have no load data` shows the reason for the failure. The following url can be used to query the wrong data: - - ```sql - show load warnings on "url"; - ```` - - You can view the specific error line. - -2. Timeout time - - - The timeout for INSERT operations is controlled by [session variable](../../../../advanced/variables.md) `insert_timeout`. The default is 4 hours. If it times out, the job will be canceled. - -3. Label and atomicity - - The INSERT operation also guarantees the atomicity of imports, see the [Import Transactions and Atomicity](../../../../data-operate/import/import-scenes/load-atomicity.md) documentation. - - When using `CTE(Common Table Expressions)` as the query part in an insert operation, the `WITH LABEL` and `column` parts must be specified. - -4. Filter Threshold - - Unlike other import methods, INSERT operations cannot specify a filter threshold (`max_filter_ratio`). The default filter threshold is 1, which means that rows with errors can be ignored. - - For business scenarios that require data not to be filtered, you can set [session variable](../../../../advanced/variables.md) `enable_insert_strict` to `true` to ensure that when there is data When filtered out, `INSERT` will not be executed successfully. - -5. Performance issues - - There is no single row insertion using the `VALUES` method. If you must use it this way, combine multiple rows of data into one INSERT statement for bulk commit. diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/SELECT.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/SELECT.md deleted file mode 100644 index 4755b606967aca..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/SELECT.md +++ /dev/null @@ -1,414 +0,0 @@ ---- -{ - "title": "SELECT", - "language": "en" -} - ---- - - - -## SELECT - -### Name - -SELECT - -### description - -Mainly introduces the use of Select syntax - -grammar: - -```sql -SELECT - [hint_statement, ...] - [ALL | DISTINCT | DISTINCTROW | ALL EXCEPT ( col_name1 [, col_name2, col_name3, ...] )] - select_expr [, select_expr ...] - [FROM table_references - [PARTITION partition_list] - [TABLET tabletid_list] - [TABLESAMPLE sample_value [ROWS | PERCENT] - [REPEATABLE pos_seek]] - [WHERE where_condition] - [GROUP BY [GROUPING SETS | ROLLUP | CUBE] {col_name | expr | position}] - [HAVING where_condition] - [ORDER BY {col_name | expr | position} - [ASC | DESC], ...] - [LIMIT {[offset,] row_count | row_count OFFSET offset}] - [INTO OUTFILE 'file_name'] -``` - -1. **Syntax Description:** - - 1. select_expr, ... Columns retrieved and displayed in the result, when using an alias, as is optional. - - 2. select_expr, ... Retrieved target table (one or more tables (including temporary tables generated by subqueries) - - 3. where_definition retrieves the condition (expression), if there is a WHERE clause, the condition filters the row data. where_condition is an expression that evaluates to true for each row to be selected. Without the WHERE clause, the statement selects all rows. In WHERE expressions, you can use any MySQL supported functions and operators except aggregate functions - - 4. `ALL | DISTINCT ` : to refresh the result set, all is all, distinct/distinctrow will refresh the duplicate columns, the default is all - - 5. `ALL EXCEPT`: Filter on the full (all) result set, except specifies the name of one or more columns to be excluded from the full result set. All matching column names will be ignored in the output. - - 6. `INTO OUTFILE 'file_name' ` : save the result to a new file (which did not exist before), the difference lies in the save format. - - 7. `Group by having`: Group the result set, and brush the result of group by when having appears. `Grouping Sets`, `Rollup`, `Cube` are extensions of group by, please refer to [GROUPING SETS DESIGN](https://doris.apache.org/community/design/grouping_sets_design) for details. - - 8. `Order by`: Sort the final result, Order by sorts the result set by comparing the size of one or more columns. - - Order by is a time-consuming and resource-intensive operation, because all data needs to be sent to 1 node before it can be sorted, and the sorting operation requires more memory than the non-sorting operation. - - If you need to return the top N sorted results, you need to use the LIMIT clause; in order to limit memory usage, if the user does not specify the LIMIT clause, the first 65535 sorted results are returned by default. - - 9. `Limit n`: limit the number of lines in the output result, `limit m,n` means output n records starting from the mth line.You should use `order by` before you use `limit m,n`, otherwise the data may be inconsistent each time it is executed. - - 10. The `Having` clause does not filter the row data in the table, but filters the results produced by the aggregate function. - - Typically `having` is used with aggregate functions (eg :`COUNT(), SUM(), AVG(), MIN(), MAX()`) and `group by` clauses. - - 11. SELECT supports explicit partition selection using PARTITION containing a list of partitions or subpartitions (or both) following the name of the table in `table_reference` - - 12. `[TABLET tids] TABLESAMPLE n [ROWS | PERCENT] [REPEATABLE seek]`: Limit the number of rows read from the table in the FROM clause, select a number of Tablets pseudo-randomly from the table according to the specified number of rows or percentages, and specify the number of seeds in REPEATABLE to return the selected samples again. In addition, you can also manually specify the TableID, Note that this can only be used for OLAP tables. - - 13. `hint_statement`: hint in front of the selectlist indicates that hints can be used to influence the behavior of the optimizer in order to obtain the desired execution plan. Details refer to [joinHint using document] (https://doris.apache.org/en/docs/query-acceleration/hint/joinHint.md) - -**Syntax constraints:** - -1. SELECT can also be used to retrieve calculated rows without referencing any table. -2. All clauses must be ordered strictly according to the above format, and a HAVING clause must be placed after the GROUP BY clause and before the ORDER BY clause. -3. The alias keyword AS is optional. Aliases can be used for group by, order by and having -4. Where clause: The WHERE statement is executed to determine which rows should be included in the GROUP BY section, and HAVING is used to determine which rows in the result set should be used. -5. The HAVING clause can refer to the total function, but the WHERE clause cannot refer to, such as count, sum, max, min, avg, at the same time, the where clause can refer to other functions except the total function. Column aliases cannot be used in the Where clause to define conditions. -6. Group by followed by with rollup can count the results one or more times. - -**Join query:** - -Doris supports JOIN syntax - -```sql -JOIN -table_references: - table_reference [, table_reference] … -table_reference: - table_factor - | join_table -table_factor: - tbl_name [[AS] alias] - [{USE|IGNORE|FORCE} INDEX (key_list)] - | ( table_references ) - | { OJ table_reference LEFT OUTER JOIN table_reference - ON conditional_expr } -join_table: - table_reference [INNER | CROSS] JOIN table_factor [join_condition] - | table_reference LEFT [OUTER] JOIN table_reference join_condition - | table_reference NATURAL [LEFT [OUTER]] JOIN table_factor - | table_reference RIGHT [OUTER] JOIN table_reference join_condition - | table_reference NATURAL [RIGHT [OUTER]] JOIN table_factor -join_condition: - ON conditional_expr -``` - -**UNION Grammar:** - -```sql -SELECT ... -UNION [ALL| DISTINCT] SELECT ...... -[UNION [ALL| DISTINCT] SELECT ...] -``` - -`UNION` is used to combine the results of multiple `SELECT` statements into a single result set. - -The column names in the first `SELECT` statement are used as the column names in the returned results. The selected columns listed in the corresponding position of each `SELECT` statement should have the same data type. (For example, the first column selected by the first statement should be of the same type as the first column selected by other statements.) - -The default behavior of `UNION` is to remove duplicate rows from the result. The optional `DISTINCT` keyword has no effect other than the default, since it also specifies duplicate row removal. With the optional `ALL` keyword, no duplicate row removal occurs, and the result includes all matching rows in all `SELECT` statements - -**WITH statement**: - -To specify common table expressions, use the `WITH` clause with one or more comma-separated clauses. Each subclause provides a subquery that generates the result set and associates the name with the subquery. The following example defines `WITH` clauses in CTEs named `cte1` and `cte2`, and refers to the `WITH` clause below their top-level `SELECT`: - -```sql -WITH - cte1 AS (SELECT a,b FROM table1), - cte2 AS (SELECT c,d FROM table2) -SELECT b,d FROM cte1 JOIN cte2 -WHERE cte1.a = cte2.c; -``` - -In a statement containing the `WITH` clause, each CTE name can be referenced to access the corresponding CTE result set. - -CTE names can be referenced in other CTEs, allowing CTEs to be defined based on other CTEs. - -Recursive CTE is currently not supported. - -### example - -1. Query the names of students whose ages are 18, 20, 25 - - ```sql - select Name from student where age in (18,20,25); - ``` -2. ALL EXCEPT Example - ```sql - -- Query all information except the students' age - select * except(age) from student; - ``` - -3. GROUP BY Example - - ```sql - --Query the tb_book table, group by type, and find the average price of each type of book, - select type,avg(price) from tb_book group by type; - ``` - -4. DISTINCT Use - - ``` - --Query the tb_book table to remove duplicate type data - select distinct type from tb_book; - ``` - -5. ORDER BY Example - - Sort query results in ascending (default) or descending (DESC) order. Ascending NULL is first, descending NULL is last - - ```sql - --Query all records in the tb_book table, sort them in descending order by id, and display three records - select * from tb_book order by id desc limit 3; - ``` - -6. LIKE fuzzy query - - Can realize fuzzy query, it has two wildcards: `%` and `_`, `%` can match one or more characters, `_` can match one character - - ``` - --Find all books whose second character is h - select * from tb_book where name like('_h%'); - ``` - -7. LIMIT limits the number of result rows - - ```sql - --1. Display 3 records in descending order - select * from tb_book order by price desc limit 3; - - --2. Display 4 records from id=1 - select * from tb_book where id limit 1,4; - ``` - -8. CONCAT join multiple columns - - ```sql - --Combine name and price into a new string output - select id,concat(name,":",price) as info,type from tb_book; - ``` - -9. Using functions and expressions - - ```sql - --Calculate the total price of various books in the tb_book table - select sum(price) as total,type from tb_book group by type; - --20% off price - select *,(price * 0.8) as "20%" from tb_book; - ``` - -10. UNION Example - - ```sql - SELECT a FROM t1 WHERE a = 10 AND B = 1 ORDER by LIMIT 10 - UNION - SELECT a FROM t2 WHERE a = 11 AND B = 2 ORDER by LIMIT 10; - ``` - -11. WITH clause example - - ```sql - WITH cte AS - ( - SELECT 1 AS col1, 2 AS col2 - UNION ALL - SELECT 3, 4 - ) - SELECT col1, col2 FROM cte; - ``` - -12. JOIN Exampel - - ```sql - SELECT * FROM t1 LEFT JOIN (t2, t3, t4) - ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c) - ``` - - Equivalent to - - ```sql - SELECT * FROM t1 LEFT JOIN (t2 CROSS JOIN t3 CROSS JOIN t4) - ON (t2.a = t1.a AND t3.b = t1.b AND t4.c = t1.c) - ``` - -13. INNER JOIN - - ```sql - SELECT t1.name, t2.salary - FROM employee AS t1 INNER JOIN info AS t2 ON t1.name = t2.name; - - SELECT t1.name, t2.salary - FROM employee t1 INNER JOIN info t2 ON t1.name = t2.name; - ``` - -14. LEFT JOIN - - ```sql - SELECT left_tbl.* - FROM left_tbl LEFT JOIN right_tbl ON left_tbl.id = right_tbl.id - WHERE right_tbl.id IS NULL; - ``` - -15. RIGHT JOIN - - ```sql - mysql> SELECT * FROM t1 RIGHT JOIN t2 ON (t1.a = t2.a); - +------+------+------+------+ - | a | b | a | c | - +------+------+------+------+ - | 2 | y | 2 | z | - | NULL | NULL | 3 | w | - +------+------+------+------+ - ``` - -16. TABLESAMPLE - - ```sql - --Pseudo-randomly sample 1000 rows in t1. Note that several Tablets are actually selected according to the statistics of the table, and the total number of selected Tablet rows may be greater than 1000, so if you want to explicitly return 1000 rows, you need to add Limit. - SELECT * FROM t1 TABLET(10001) TABLESAMPLE(1000 ROWS) REPEATABLE 2 limit 1000; - ``` - -### keywords - - SELECT - -### Best Practice - -1. ome additional knowledge about the SELECT clause - - - An alias can be specified for select_expr using AS alias_name. Aliases are used as column names in expressions and can be used in GROUP BY, ORDER BY or HAVING clauses. The AS keyword is a good habit to use when specifying aliases for columns. - - - table_references after FROM indicates one or more tables participating in the query. If more than one table is listed, a JOIN operation is performed. And for each specified table, you can define an alias for it - - - The selected column after SELECT can be referenced in ORDER IN and GROUP BY by column name, column alias or integer (starting from 1) representing the column position - - ```sql - SELECT college, region, seed FROM tournament - ORDER BY region, seed; - - SELECT college, region AS r, seed AS s FROM tournament - ORDER BY r, s; - - SELECT college, region, seed FROM tournament - ORDER BY 2, 3; - ```` - - - If ORDER BY appears in a subquery and also applies to the outer query, the outermost ORDER BY takes precedence. - - - If GROUP BY is used, the grouped columns are automatically sorted in ascending order (as if there was an ORDER BY statement followed by the same columns). If you want to avoid the overhead of GROUP BY due to automatic sorting, adding ORDER BY NULL can solve it: - - ```sql - SELECT a, COUNT(b) FROM test_table GROUP BY a ORDER BY NULL; - ```` - - - - - When sorting columns in a SELECT using ORDER BY or GROUP BY, the server sorts values using only the initial number of bytes indicated by the max_sort_length system variable. - - - Having clauses are generally applied last, just before the result set is returned to the MySQL client, and is not optimized. (while LIMIT is applied after HAVING) - - The SQL standard requires: HAVING must refer to a column in the GROUP BY list or used by an aggregate function. However, MySQL extends this by allowing HAVING to refer to columns in the Select clause list, as well as columns from outer subqueries. - - A warning is generated if the column referenced by HAVING is ambiguous. In the following statement, col2 is ambiguous: - - ```sql - SELECT COUNT(col1) AS col2 FROM t GROUP BY col2 HAVING col2 = 2; - ```` - - - Remember not to use HAVING where WHERE should be used. HAVING is paired with GROUP BY. - - - The HAVING clause can refer to aggregate functions, while WHERE cannot. - - ```sql - SELECT user, MAX(salary) FROM users - GROUP BY user HAVING MAX(salary) > 10; - ```` - - - The LIMIT clause can be used to constrain the number of rows returned by a SELECT statement. LIMIT can have one or two arguments, both of which must be non-negative integers. - - ```sql - /*Retrieve 6~15 rows in the result set*/ - SELECT * FROM tbl LIMIT 5,10; - /*Then if you want to retrieve all rows after a certain offset is set, you can set a very large constant for the second parameter. The following query fetches all data from row 96 onwards */ - SELECT * FROM tbl LIMIT 95,18446744073709551615; - /*If LIMIT has only one parameter, the parameter specifies the number of rows that should be retrieved, and the offset defaults to 0, that is, starting from the first row*/ - ```` - - - SELECT...INTO allows query results to be written to a file - -2. Modifiers of the SELECT keyword - - - deduplication - - The ALL and DISTINCT modifiers specify whether to deduplicate rows in the result set (should not be a column). - - ALL is the default modifier, that is, all rows that meet the requirements are to be retrieved. - - DISTINCT removes duplicate rows. - -2. The main advantage of subqueries - - - Subqueries allow structured queries so that each part of a statement can be isolated. - - Some operations require complex unions and associations. Subqueries provide other ways to perform these operations - -3. Speed up queries - - - Use Doris's partition and bucket as data filtering conditions as much as possible to reduce the scope of data scanning - - Make full use of Doris's prefix index fields as data filter conditions to speed up query speed - -4. UNION - - - Using only the union keyword has the same effect as using union disitnct. Since the deduplication work is more memory-intensive, the query speed using the union all operation will be faster and the memory consumption will be less. If users want to perform order by and limit operations on the returned result set, they need to put the union operation in the subquery, then select from subquery, and finally put the subquery and order by outside the subquery. - - ```sql - select * from (select age from student_01 union all select age from student_02) as t1 - order by age limit 4; - - +-------------+ - | age | - +-------------+ - | 18 | - | 19 | - | 20 | - | 21 | - +-------------+ - 4 rows in set (0.01 sec) - ```` - -6. JOIN - - - In the inner join condition, in addition to supporting equal-valued joins, it also supports unequal-valued joins. For performance reasons, it is recommended to use equal-valued joins. - - Other joins only support equivalent joins - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/UPDATE.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/UPDATE.md deleted file mode 100644 index 050e59d393c91e..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/Manipulation/UPDATE.md +++ /dev/null @@ -1,167 +0,0 @@ ---- -{ - "title": "UPDATE", - "language": "en" -} ---- - - - -## UPDATE - -### Name - -UPDATE - -### Description - -This statement is used to update the data. The UPDATE statement currently only supports the UNIQUE KEY model. - -The UPDATE operation currently only supports updating the Value column. The update of the Key column can refer to [Using FlinkCDC to update Key column](../../../../ecosystem/flink-doris-connector.md#use-flinkcdc-to-update-key-column). -#### Syntax - -```sql -UPDATE target_table [table_alias] - SET assignment_list - WHERE condition - -assignment_list: - assignment [, assignment] ... - -assignment: - col_name = value - -value: - {expr | DEFAULT} -``` - - - -```sql -UPDATE target_table [table_alias] - SET assignment_list - [ FROM additional_tables] - WHERE condition -``` - - - -#### Required Parameters - -+ target_table: The target table of the data to be updated. Can be of the form 'db_name.table_name' -+ assignment_list: The target column to be updated, in the format 'col_name = value, col_name = value' -+ where condition: the condition that is expected to be updated, an expression that returns true or false can be - -#### Optional Parameters - - - -+ table_alias: alias of table -+ FROM additional_tables: Specifies one or more tables to use for selecting rows to update or for setting new values. Note that if you want use target table here, you should give it a alias explicitly. - - - -#### Note - -The current UPDATE statement only supports row updates on the UNIQUE KEY model. - -### Example - -The `test` table is a unique model table, which contains four columns: k1, k2, v1, v2. Where k1, k2 are keys, v1, v2 are values, and the aggregation method is Replace. - -1. Update the v1 column in the 'test' table that satisfies the conditions k1 =1 , k2 =2 to 1 - -```sql -UPDATE test SET v1 = 1 WHERE k1=1 and k2=2; -``` - -2. Increment the v1 column of the k1=1 column in the 'test' table by 1 - -```sql -UPDATE test SET v1 = v1+1 WHERE k1=1; -``` - - - -3. use the result of `t2` join `t3` to update `t1` - -```sql --- create t1, t2, t3 tables -CREATE TABLE t1 - (id INT, c1 BIGINT, c2 STRING, c3 DOUBLE, c4 DATE) -UNIQUE KEY (id) -DISTRIBUTED BY HASH (id) -PROPERTIES('replication_num'='1', "function_column.sequence_col" = "c4"); - -CREATE TABLE t2 - (id INT, c1 BIGINT, c2 STRING, c3 DOUBLE, c4 DATE) -DISTRIBUTED BY HASH (id) -PROPERTIES('replication_num'='1'); - -CREATE TABLE t3 - (id INT) -DISTRIBUTED BY HASH (id) -PROPERTIES('replication_num'='1'); - --- insert data -INSERT INTO t1 VALUES - (1, 1, '1', 1.0, '2000-01-01'), - (2, 2, '2', 2.0, '2000-01-02'), - (3, 3, '3', 3.0, '2000-01-03'); - -INSERT INTO t2 VALUES - (1, 10, '10', 10.0, '2000-01-10'), - (2, 20, '20', 20.0, '2000-01-20'), - (3, 30, '30', 30.0, '2000-01-30'), - (4, 4, '4', 4.0, '2000-01-04'), - (5, 5, '5', 5.0, '2000-01-05'); - -INSERT INTO t3 VALUES - (1), - (4), - (5); - --- update t1 -UPDATE t1 - SET t1.c1 = t2.c1, t1.c3 = t2.c3 * 100 - FROM t2 INNER JOIN t3 ON t2.id = t3.id - WHERE t1.id = t2.id; -``` - -the expect result is only update the row where id = 1 in table t1 - -``` -+----+----+----+--------+------------+ -| id | c1 | c2 | c3 | c4 | -+----+----+----+--------+------------+ -| 1 | 10 | 1 | 1000.0 | 2000-01-01 | -| 2 | 2 | 2 | 2.0 | 2000-01-02 | -| 3 | 3 | 3 | 3.0 | 2000-01-03 | -+----+----+----+--------+------------+ -``` - - - -### Keywords - - UPDATE - -### Best Practice - diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/OUTFILE.md b/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/OUTFILE.md deleted file mode 100644 index e0ab12ec9351fd..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Manipulation-Statements/OUTFILE.md +++ /dev/null @@ -1,408 +0,0 @@ ---- -{ - "title": "OUTFILE", - "language": "en" -} ---- - - - -## OUTFILE -### Name - -OUTFILE - -### description - -This statement is used to export query results to a file using the `SELECT INTO OUTFILE` command. Currently, it supports exporting to remote storage, such as HDFS, S3, BOS, COS (Tencent Cloud), through the Broker process, S3 protocol, or HDFS protocol. - -#### grammar: - -```sql -query_stmt -INTO OUTFILE "file_path" -[format_as] -[properties] -``` - -#### illustrate: - -1. file_path - - file_path points to the path where the file is stored and the file prefix. Such as `hdfs://path/to/my_file_`. - - ``` - The final filename will consist of `my_file_`, the file number and the file format suffix. The file serial number starts from 0, and the number is the number of files to be divided. Such as: - - my_file_abcdefg_0.csv - my_file_abcdefg_1.csv - my_file_abcdegf_2.csv - ``` - You can also omit the file prefix and specify only the file directory, such as: `hdfs://path/to/` - -2. format_as - - ``` - FORMAT AS CSV - ``` - - Specifies the export format. Supported formats include CSV, PARQUET, CSV_WITH_NAMES, CSV_WITH_NAMES_AND_TYPES and ORC. Default is CSV. - > Note: PARQUET, CSV_WITH_NAMES, CSV_WITH_NAMES_AND_TYPES, and ORC are supported starting in version 1.2 . - -3. properties - - Specify related properties. Currently exporting via the Broker process, S3 protocol, or HDFS protocol is supported. - - ``` - grammar: - [PROPERTIES ("key"="value", ...)] - The following properties are supported: - - File related properties - column_separator: column separator,is only for CSV format. mulit-bytes is supported starting in version 1.2, such as: "\\x01", "abc". - line_delimiter: line delimiter,is only for CSV format. mulit-bytes supported starting in version 1.2, such as: "\\x01", "abc". - max_file_size: the size limit of a single file, if the result exceeds this value, it will be cut into multiple files, the value range of max_file_size is [5MB, 2GB] and the default is 1GB. (When specified that the file format is ORC, the size of the actual division file will be a multiples of 64MB, such as: specify max_file_size = 5MB, and actually use 64MB as the division; specify max_file_size = 65MB, and will actually use 128MB as cut division points.) - delete_existing_files: default `false`. If it is specified as true, you will first delete all files specified in the directory specified by the file_path, and then export the data to the directory.For example: "file_path" = "/user/tmp", then delete all files and directory under "/user/"; "file_path" = "/user/tmp/", then delete all files and directory under "/user/tmp/" - file_suffix: Specify the suffix of the export file. If this parameter is not specified, the default suffix for the file format will be used. - - Broker related properties need to be prefixed with `broker.`: - broker.name: broker name - broker.hadoop.security.authentication: specify the authentication method as kerberos - broker.kerberos_principal: specifies the principal of kerberos - broker.kerberos_keytab: specifies the path to the keytab file of kerberos. The file must be the absolute path to the file on the server where the broker process is located. and can be accessed by the Broker process - - HDFS related properties: - fs.defaultFS: namenode address and port - hadoop.username: hdfs username - dfs.nameservices: if hadoop enable HA, please set fs nameservice. See hdfs-site.xml - dfs.ha.namenodes.[nameservice ID]:unique identifiers for each NameNode in the nameservice. See hdfs-site.xml - dfs.namenode.rpc-address.[nameservice ID].[name node ID]: the fully-qualified RPC address for each NameNode to listen on. See hdfs-site.xml - dfs.client.failover.proxy.provider.[nameservice ID]:the Java class that HDFS clients use to contact the Active NameNode, usually it is org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider - - For a kerberos-authentication enabled Hadoop cluster, additional properties need to be set: - dfs.namenode.kerberos.principal: HDFS namenode service principal - hadoop.security.authentication: kerberos - hadoop.kerberos.principal: the Kerberos pincipal that Doris will use when connectiong to HDFS. - hadoop.kerberos.keytab: HDFS client keytab location. - - For the S3 protocol, you can directly execute the S3 protocol configuration: - s3.endpoint - s3.access_key - s3.secret_key - s3.region - use_path_style: (optional) default false . The S3 SDK uses the virtual-hosted style by default. However, some object storage systems may not be enabled or support virtual-hosted style access. At this time, we can add the use_path_style parameter to force the use of path style access method. - ``` - - > Note that to use the `delete_existing_files` parameter, you also need to add the configuration `enable_delete_existing_files = true` to the fe.conf file and restart the FE. Only then will the `delete_existing_files` parameter take effect. Setting `delete_existing_files = true` is a dangerous operation and it is recommended to only use it in a testing environment. - -4. Data Types for Export - - All file formats support the export of basic data types, while only csv/orc/csv_with_names/csv_with_names_and_types currently support the export of complex data types (ARRAY/MAP/STRUCT). Nested complex data types are not supported. - -5. Concurrent Export - - Setting the session variable `set enable_parallel_outfile = true;` enables concurrent export using outfile. For detailed usage, see [Export Query Result](../../../data-operate/export/outfile.md). - -6. Export to Local - - To export to a local file, you need configure `enable_outfile_to_local=true` in fe.conf. - - ```sql - select * from tbl1 limit 10 - INTO OUTFILE "file:///home/work/path/result_"; - ``` - -#### DataType Mapping - -Parquet and ORC file formats have their own data types. The export function of Doris can automatically export the Doris data types to the corresponding data types of the Parquet/ORC file format. The following are the data type mapping relationship of the Doris data types and the Parquet/ORC file format data types: - -1. The mapping relationship between the Doris data types to the ORC data types is: - - | Doris Type | Orc Type | - | --- | --- | - | boolean | boolean | - | tinyint | tinyint | - | smallint | smallint | - | int | int | - | bigint | bigint | - | largeInt | string | - | date | string | - | datev2 | string | - | datetime | string | - | datetimev2 | timestamp | - | float | float | - | double | double | - | char / varchar / string | string | - | decimal | decimal | - | struct | struct | - | map | map | - | array | array | - -2. When Doris exports data to the Parquet file format, the Doris memory data will be converted to Arrow memory data format first, and then the paraquet file format is written by Arrow. The mapping relationship between the Doris data types to the ARROW data types is: - - | Doris Type | Arrow Type | - | --- | --- | - | boolean | boolean | - | tinyint | int8 | - | smallint | int16 | - | int | int32 | - | bigint | int64 | - | largeInt | utf8 | - | date | utf8 | - | datev2 | utf8 | - | datetime | utf8 | - | datetimev2 | utf8 | - | float | float32 | - | double | float64 | - | char / varchar / string | utf8 | - | decimal | decimal128 | - | struct | struct | - | map | map | - | array | list | - - - - -### example - -1. Use the broker method to export, and export the simple query results to the file `hdfs://path/to/result.txt`. Specifies that the export format is CSV. Use `my_broker` and set kerberos authentication information. Specify the column separator as `,` and the row separator as `\n`. - - ```sql - SELECT * FROM tbl - INTO OUTFILE "hdfs://path/to/result_" - FORMAT AS CSV - PROPERTIES - ( - "broker.name" = "my_broker", - "broker.hadoop.security.authentication" = "kerberos", - "broker.kerberos_principal" = "doris@YOUR.COM", - "broker.kerberos_keytab" = "/home/doris/my.keytab", - "column_separator" = ",", - "line_delimiter" = "\n", - "max_file_size" = "100MB" - ); - ```` - - If the final generated file is not larger than 100MB, it will be: `result_0.csv`. - If larger than 100MB, it may be `result_0.csv, result_1.csv, ...`. - -2. Export the simple query results to the file `hdfs://path/to/result.parquet`. Specify the export format as PARQUET. Use `my_broker` and set kerberos authentication information. - - ```sql - SELECT c1, c2, c3 FROM tbl - INTO OUTFILE "hdfs://path/to/result_" - FORMAT AS PARQUET - PROPERTIES - ( - "broker.name" = "my_broker", - "broker.hadoop.security.authentication" = "kerberos", - "broker.kerberos_principal" = "doris@YOUR.COM", - "broker.kerberos_keytab" = "/home/doris/my.keytab" - ); - ```` - -3. Export the query result of the CTE statement to the file `hdfs://path/to/result.txt`. The default export format is CSV. Use `my_broker` and set hdfs high availability information. Use the default row and column separators. - - ```sql - WITH - x1 AS - (SELECT k1, k2 FROM tbl1), - x2 AS - (SELECT k3 FROM tbl2) - SELEC k1 FROM x1 UNION SELECT k3 FROM x2 - INTO OUTFILE "hdfs://path/to/result_" - PROPERTIES - ( - "broker.name" = "my_broker", - "broker.username"="user", - "broker.password"="passwd", - "broker.dfs.nameservices" = "my_ha", - "broker.dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2", - "broker.dfs.namenode.rpc-address.my_ha.my_namenode1" = "nn1_host:rpc_port", - "broker.dfs.namenode.rpc-address.my_ha.my_namenode2" = "nn2_host:rpc_port", - "broker.dfs.client.failover.proxy.provider" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" - ); - ```` - - If the final generated file is not larger than 1GB, it will be: `result_0.csv`. - If larger than 1GB, it may be `result_0.csv, result_1.csv, ...`. - -4. Export the query result of the UNION statement to the file `bos://bucket/result.txt`. Specify the export format as PARQUET. Use `my_broker` and set hdfs high availability information. The PARQUET format does not require a column delimiter to be specified. - After the export is complete, an identity file is generated. - - ```sql - SELECT k1 FROM tbl1 UNION SELECT k2 FROM tbl1 - INTO OUTFILE "bos://bucket/result_" - FORMAT AS PARQUET - PROPERTIES - ( - "broker.name" = "my_broker", - "broker.bos_endpoint" = "http://bj.bcebos.com", - "broker.bos_accesskey" = "xxxxxxxxxxxxxxxxxxxxxxxxxxx", - "broker.bos_secret_accesskey" = "yyyyyyyyyyyyyyyyyyyyyyyyy" - ); - ```` - -5. Export the query result of the select statement to the file `s3a://${bucket_name}/path/result.txt`. Specify the export format as csv. - After the export is complete, an identity file is generated. - - ```sql - select k1,k2,v1 from tbl1 limit 100000 - into outfile "s3a://my_bucket/export/my_file_" - FORMAT AS CSV - PROPERTIES - ( - "broker.name" = "hdfs_broker", - "broker.fs.s3a.access.key" = "xxx", - "broker.fs.s3a.secret.key" = "xxxx", - "broker.fs.s3a.endpoint" = "https://cos.xxxxxx.myqcloud.com/", - "column_separator" = ",", - "line_delimiter" = "\n", - "max_file_size" = "1024MB", - "success_file_name" = "SUCCESS" - ) - ```` - - If the final generated file is not larger than 1GB, it will be: `my_file_0.csv`. - If larger than 1GB, it may be `my_file_0.csv, result_1.csv, ...`. - Verify on cos - - 1. A path that does not exist will be automatically created - 2. Access.key/secret.key/endpoint needs to be confirmed with students of cos. Especially the value of endpoint does not need to fill in bucket_name. - -6. Use the s3 protocol to export to bos, and enable concurrent export. - - ```sql - set enable_parallel_outfile = true; - select k1 from tb1 limit 1000 - into outfile "s3://my_bucket/export/my_file_" - format as csv - properties - ( - "s3.endpoint" = "http://s3.bd.bcebos.com", - "s3.access_key" = "xxxx", - "s3.secret_key" = "xxx", - "s3.region" = "bd" - ) - ```` - - The resulting file is prefixed with `my_file_{fragment_instance_id}_`. - -7. Use the s3 protocol to export to bos, and enable concurrent export of session variables. - Note: However, since the query statement has a top-level sorting node, even if the concurrently exported session variable is enabled for this query, it cannot be exported concurrently. - - ```sql - set enable_parallel_outfile = true; - select k1 from tb1 order by k1 limit 1000 - into outfile "s3://my_bucket/export/my_file_" - format as csv - properties - ( - "s3.endpoint" = "http://s3.bd.bcebos.com", - "s3.access_key" = "xxxx", - "s3.secret_key" = "xxx", - "s3.region" = "bd" - ) - ```` - -8. Use hdfs export to export simple query results to the file `hdfs://${host}:${fileSystem_port}/path/to/result.txt`. Specify the export format as CSV and the user name as work. Specify the column separator as `,` and the row separator as `\n`. - - ```sql - -- the default port of fileSystem_port is 9000 - SELECT * FROM tbl - INTO OUTFILE "hdfs://${host}:${fileSystem_port}/path/to/result_" - FORMAT AS CSV - PROPERTIES - ( - "fs.defaultFS" = "hdfs://ip:port", - "hadoop.username" = "work" - ); - ``` - - If the Hadoop cluster is highly available and Kerberos authentication is enabled, you can refer to the following SQL statement: - - ```sql - SELECT * FROM tbl - INTO OUTFILE "hdfs://path/to/result_" - FORMAT AS CSV - PROPERTIES - ( - 'fs.defaultFS'='hdfs://hacluster/', - 'dfs.nameservices'='hacluster', - 'dfs.ha.namenodes.hacluster'='n1,n2', - 'dfs.namenode.rpc-address.hacluster.n1'='192.168.0.1:8020', - 'dfs.namenode.rpc-address.hacluster.n2'='192.168.0.2:8020', - 'dfs.client.failover.proxy.provider.hacluster'='org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider', - 'dfs.namenode.kerberos.principal'='hadoop/_HOST@REALM.COM' - 'hadoop.security.authentication'='kerberos', - 'hadoop.kerberos.principal'='doris_test@REALM.COM', - 'hadoop.kerberos.keytab'='/path/to/doris_test.keytab' - ); - - If the final generated file is not larger than 100MB, it will be: `result_0.csv`. - If larger than 100MB, it may be `result_0.csv, result_1.csv, ...`. - -9. Export the query result of the select statement to the file `cosn://${bucket_name}/path/result.txt` on Tencent Cloud Object Storage (COS). Specify the export format as csv. - After the export is complete, an identity file is generated. - - ```sql - select k1,k2,v1 from tbl1 limit 100000 - into outfile "cosn://my_bucket/export/my_file_" - FORMAT AS CSV - PROPERTIES - ( - "broker.name" = "broker_name", - "broker.fs.cosn.userinfo.secretId" = "xxx", - "broker.fs.cosn.userinfo.secretKey" = "xxxx", - "broker.fs.cosn.bucket.endpoint_suffix" = "cos.xxxxxx.myqcloud.com", - "column_separator" = ",", - "line_delimiter" = "\n", - "max_file_size" = "1024MB", - "success_file_name" = "SUCCESS" - ) - ```` - -### keywords - -OUTFILE - -### Best Practice - -1. Export data volume and export efficiency - - This function essentially executes an SQL query command. The final result is a single-threaded output. Therefore, the time-consuming of the entire export includes the time-consuming of the query itself and the time-consuming of writing the final result set. If the query is large, you need to set the session variable `query_timeout` to appropriately extend the query timeout. - -2. Management of export files - - Doris does not manage exported files. Including the successful export, or the remaining files after the export fails, all need to be handled by the user. - -3. Export to local file - - The ability to export to a local file is not available for public cloud users, only for private deployments. And the default user has full control over the cluster nodes. Doris will not check the validity of the export path filled in by the user. If the process user of Doris does not have write permission to the path, or the path does not exist, an error will be reported. At the same time, for security reasons, if a file with the same name already exists in this path, the export will also fail. - - Doris does not manage files exported locally, nor does it check disk space, etc. These files need to be managed by the user, such as cleaning and so on. - -4. Results Integrity Guarantee - - This command is a synchronous command, so it is possible that the task connection is disconnected during the execution process, so that it is impossible to live the exported data whether it ends normally, or whether it is complete. At this point, you can use the `success_file_name` parameter to request that a successful file identifier be generated in the directory after the task is successful. Users can use this file to determine whether the export ends normally. - -5. Other Points to Note - - See [Export Query Result](../../../data-operate/export/outfile.md) \ No newline at end of file diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Types/AGG_STATE.md b/docs/en/docs/sql-manual/sql-reference/Data-Types/AGG_STATE.md deleted file mode 100644 index 9dea25a52a5e04..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Types/AGG_STATE.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -{ - "title": "AGG_STATE", - "language": "en" -} ---- - - - -## AGG_STATE -### description - AGG_STATE cannot be used as a key column, and the signature of the aggregation function must be declared at the same time when creating the table. - User does not need to specify length and default value. The actual stored data size is related to the function implementation. - - AGG_STATE can only be used with [state](../../sql-functions/combinators/state.md) - /[merge](../../sql-functions/combinators/merge.md)/[union](../..//sql-functions/combinators/union.md) function combiner usage. - - It should be noted that the signature of the aggregation function is also part of the type, and agg_state with different signatures cannot be mixed. For example, if the signature of the table creation statement is `max_by(int,int)`, then `max_by(bigint,int)` or `group_concat(varchar)` cannot be inserted. - The nullable attribute here is also part of the signature. If you can confirm that you will not enter a null value, you can declare the parameter as not null, which can obtain a smaller storage size and reduce serialization/deserialization overhead. - -### example - -Create table example: -```sql - create table a_table( - k1 int null, - k2 agg_state generic, - k3 agg_state generic - ) - aggregate key (k1) - distributed BY hash(k1) buckets 3 - properties("replication_num" = "1"); -``` -Here k2 and k3 use max_by and group_concat as aggregation types respectively. - -Insert data example: -```sql - insert into a_table values(1,max_by_state(3,1),group_concat_state('a')); - insert into a_table values(1,max_by_state(2,2),group_concat_state('bb')); - insert into a_table values(2,max_by_state(1,3),group_concat_state('ccc')); -``` -For the agg_state column, the insert statement must use the [state](../../sql-functions/combinators/state.md) function to generate the corresponding agg_state data, where the functions and input parameter types must completely correspond to agg_state. - -Select data example: -```sql - mysql [test]>select k1,max_by_merge(k2),group_concat_merge(k3) from a_table group by k1 order by k1; - +------+--------------------+--------------------------+ - | k1 | max_by_merge(`k2`) | group_concat_merge(`k3`) | - +------+--------------------+--------------------------+ - | 1 | 2 | bb,a | - | 2 | 1 | ccc | - +------+--------------------+--------------------------+ -``` -If you need to get the actual result, you need to use the corresponding [merge](../../sql-functions/combinators/merge.md) function. - -```sql - mysql [test]>select max_by_merge(u2),group_concat_merge(u3) from ( - select k1,max_by_union(k2) as u2,group_concat_union(k3) u3 from a_table group by k1 order by k1 - ) t; - +--------------------+--------------------------+ - | max_by_merge(`u2`) | group_concat_merge(`u3`) | - +--------------------+--------------------------+ - | 1 | ccc,bb,a | - +--------------------+--------------------------+ -``` -If you want to aggregate only the agg_state without getting the actual result during the process, you can use the [union](../..//sql-functions/combinators/union.md) function. - -For more examples, see [datatype_p0/agg_state](https://github.com/apache/doris/tree/master/regression-test/suites/datatype_p0/agg_state) -### keywords - - AGG_STATE diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Types/ARRAY.md b/docs/en/docs/sql-manual/sql-reference/Data-Types/ARRAY.md deleted file mode 100644 index 49ae2fc28f1b67..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Types/ARRAY.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -{ - "title": "ARRAY", - "language": "en" -} ---- - - - -## ARRAY - - - -ARRAY - - - -### description - -`ARRAY` - -An array of T-type items, it cannot be used as a key column. Now ARRAY can only used in Duplicate Model Tables. - - - -After version 2.0, it supports the use of non-key columns in Unique model tables. - - - -T-type could be any of: - -``` -BOOLEAN, TINYINT, SMALLINT, INT, BIGINT, LARGEINT, FLOAT, DOUBLE, DECIMAL, DATE, -DATEV2, DATETIME, DATETIMEV2, CHAR, VARCHAR, STRING -``` - -### example - -Create table example: - -``` -mysql> CREATE TABLE `array_test` ( - `id` int(11) NULL COMMENT "", - `c_array` ARRAY NULL COMMENT "" -) ENGINE=OLAP -DUPLICATE KEY(`id`) -COMMENT "OLAP" -DISTRIBUTED BY HASH(`id`) BUCKETS 1 -PROPERTIES ( -"replication_allocation" = "tag.location.default: 1", -"in_memory" = "false", -"storage_format" = "V2" -); -``` - -Insert data example: - -``` -mysql> INSERT INTO `array_test` VALUES (1, [1,2,3,4,5]); -mysql> INSERT INTO `array_test` VALUES (2, [6,7,8]), (3, []), (4, null); -``` - -Select data example: - -``` -mysql> SELECT * FROM `array_test`; -+------+-----------------+ -| id | c_array | -+------+-----------------+ -| 1 | [1, 2, 3, 4, 5] | -| 2 | [6, 7, 8] | -| 3 | [] | -| 4 | NULL | -+------+-----------------+ -``` - -### keywords - - ARRAY diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Types/BIGINT.md b/docs/en/docs/sql-manual/sql-reference/Data-Types/BIGINT.md deleted file mode 100644 index 5ceab3c35d3ec0..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Types/BIGINT.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -{ - "title": "BIGINT", - "language": "en" -} ---- - - - -## BIGINT -### Description -BIGINT -8-byte signed integer, range [-9223372036854775808, 9223372036854775807] - -### keywords -BIGINT diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Types/BITMAP.md b/docs/en/docs/sql-manual/sql-reference/Data-Types/BITMAP.md deleted file mode 100644 index cc26c84db179c6..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Types/BITMAP.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -{ - "title": "BITMAP", - "language": "en" -} ---- - - - -## BITMAP -### Description -BITMAP - -The columns of the BITMAP type can be used in Aggregate table, Unique table or Duplicate table. -When used in a Unique table or Duplicate table, they must be used as non-key columns. -When used in an Aggregate table, they must be used as non-key columns, and the aggregation type is BITMAP_UNION when building the table. -The user does not need to specify the length and default value. The length is controlled within the system according to the degree of data aggregation. -And the BITMAP column can only be queried or used by supporting functions such as bitmap_union_count, bitmap_union, bitmap_hash and bitmap_hash64. - -The use of BITMAP in offline scenarios will affect the import speed. In the case of a large amount of data, the query speed will be slower than HLL and better than Count Distinct. -Note: If BITMAP does not use a global dictionary in real-time scenarios, using bitmap_hash() may cause an error of about one-thousandth. If the error rate is not tolerable, bitmap_hash64 can be used instead. - -### example - -Create table example: - - create table metric_table ( - datekey int, - hour int, - device_id bitmap BITMAP_UNION - ) - aggregate key (datekey, hour) - distributed by hash(datekey, hour) buckets 1 - properties( - "replication_num" = "1" - ); - -Insert data example: - - insert into metric_table values - (20200622, 1, to_bitmap(243)), - (20200622, 2, bitmap_from_array([1,2,3,4,5,434543])), - (20200622, 3, to_bitmap(287667876573)); - -Query data example: - - select hour, BITMAP_UNION_COUNT(pv) over(order by hour) uv from( - select hour, BITMAP_UNION(device_id) as pv - from metric_table -- Query the accumulated UV per hour - where datekey=20200622 - group by hour order by 1 - ) final; - -When querying, BITMAP can cooperate with `return_object_data_as_binary`. For details, please refer to [variables](../../../advanced/variables.md). - -### keywords -BITMAP diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Types/BOOLEAN.md b/docs/en/docs/sql-manual/sql-reference/Data-Types/BOOLEAN.md deleted file mode 100644 index d678a548e2cc94..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Types/BOOLEAN.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -{ - "title": "BOOLEAN", - "language": "en" -} ---- - - - -## BOOLEAN -### Description -BOOL, BOOLEAN -Like TINYINT, 0 stands for false and 1 for true. - -### keywords -BOOLEAN diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Types/CHAR.md b/docs/en/docs/sql-manual/sql-reference/Data-Types/CHAR.md deleted file mode 100644 index d876498a4b4af3..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Types/CHAR.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -{ - "title": "CHAR", - "language": "en" -} ---- - - - -## CHAR -### Description -CHAR(M) -A fixed-length string, M represents the byte length of a fixed-length string. The range of M is 1-255. - -### keywords -CHAR diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Types/DATE.md b/docs/en/docs/sql-manual/sql-reference/Data-Types/DATE.md deleted file mode 100644 index f7767a34985488..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Types/DATE.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -{ - "title": "DATE", - "language": "en" -} ---- - - - -## DATE - - - -DATE - - - -### Description -#### Syntax -date -Date type, the current range of values is ['0000-01-01','9999-12-31'], and the default print form is 'yyyy-MM-dd'. - -### example -``` -SELECT DATE('2003-12-31 01:02:03'); -+-----------------------------+ -| DATE('2003-12-31 01:02:03') | -+-----------------------------+ -| 2003-12-31 | -+-----------------------------+ -``` - -### keywords -DATE diff --git a/docs/en/docs/sql-manual/sql-reference/Data-Types/DATETIME.md b/docs/en/docs/sql-manual/sql-reference/Data-Types/DATETIME.md deleted file mode 100644 index 609ca416f1dcd0..00000000000000 --- a/docs/en/docs/sql-manual/sql-reference/Data-Types/DATETIME.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -{ - "title": "DATETIME", - "language": "en" -} ---- - - - -## DATETIME - - - -DATETIME - - - -### Description - -DATETIME([P]) -Date and time type. -The optional parameter P indicates the time precision and the value range is [0, 6], that is, it supports up to 6 decimal places (microseconds). 0 when not set. -Value range is ['0000-01-01 00:00:00[.000000]','9999-12-31 23:59:59[.999999]']. -The form of printing is 'yyyy-MM-dd HH:mm:ss.SSSSSS' - -### note - -DATETIME supports temporal precision up to microseconds. When parsing imported DATETIME type data using the BE side (e.g. using Stream load, Spark load, etc.), or using the FE side with the [Nereids](../../../query-acceleration/nereids) on, decimals exceeding the current precision will be **rounded**. -DATETIME reads support resolving the time zone in the format of the original DATETIME literal followed by the time zone: -```sql -