diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..8a62d874 --- /dev/null +++ b/404.html @@ -0,0 +1,2453 @@ + + + + + + + + + + + + + + + + + + + SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Autonomously scale Compute, Storage/autonomous.png b/Autonomously scale Compute, Storage/autonomous.png new file mode 100644 index 00000000..1119acc3 Binary files /dev/null and b/Autonomously scale Compute, Storage/autonomous.png differ diff --git a/Autonomously scale Compute, Storage/index.html b/Autonomously scale Compute, Storage/index.html new file mode 100644 index 00000000..860e9186 --- /dev/null +++ b/Autonomously scale Compute, Storage/index.html @@ -0,0 +1,2636 @@ + + + + + + + + + + + + + + + + + + + + + + + Autonomously scale Compute, Storage - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Autonomously scale Compute, Storage

+

Autonomous features enable automatic scaling in response to changes in workload.

+

Auto-scale of nodes enables scaling based on load:

+
    +
  • In/Out auto-scaling performs horizontal scaling, decreasing (In) or increasing (Out) the node count.
  • +
  • Up/Down auto-scaling performs vertical scaling, increasing (Up) or decreasing (Down) the instance size.
  • +
+

Auto-scale of storage enables expansion of capacity based on usage.

+

Autonomous features can be enabled at time ofĀ service launch. +Autonomous features can be enabled or disabled after launch.

+

autonomous.png

+

Enable Auto-Scaling of Nodes

+

Auto-scaling of nodes can be enabled either at time of service launch or after service launch.

+

DuringĀ service launch:

+
    +
  • Check the "Enable auto-scale nodes" checkbox and set the desired scaling parameters.
  • +
+

After service launch,Ā manage Autonomous settings, and enable the desired auto-scaling features.

+

Enable Auto-Scaling of Storage

+

Auto-scaling of storage can be enabled either at time of service launch or after service launch.

+

DuringĀ Service Launch:

+
    +
  • Check the "Enable auto-scale storage" checkbox and set the desired maximum transactional data storage.
  • +
+

After service launch,Ā manage Autonomous settings, and enable the desired auto-scaling features.

+

Manage Autonomous Settings

+

To manage Autonomous settings:

+
    +
  • From theĀ Portal, click the "MANAGE" button for the desired service, + then choose "Autonomous" from the menu.
  • +
  • Update settings as desired.
  • +
  • Click "Apply Changes" when complete.
  • +
+

Scaling Rules

+

Automatic scaling occurs based on rules.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PolicyConditionAction
Auto-Scale Disk
  • Disk utilization > 90% sustained for 5 minutes.
  • The disk is expected to run out of capacity in the next 24 hours (predicted based on the last 6 hours of service usage).
Upgrade storage to the next available size in 100GB increments.
You cannot downgrade storage, the upgrade is irreversible.
Auto-Scale Nodes Out
  • CPU utilization > 75% over all replicas sustained for 30 minutes.
  • Number of concurrent sessions > 90% over all replicas sustained for 1 hour.
  • Number of concurrent sessions is expected to hit the maximum within 4 hours (predicted based on the last 2 hours of service usage).
Add new replica or node.
Additional nodes will be of the same size and configuration as existing nodes.
Auto-Scale Nodes In
  • CPU utilization < 50% over all replicas sustained for 1 hour.
  • Number of concurrent sessions < 50% over all replicas sustained for 1 hour.
Remove replica or node.
Node count will not decrease below the initial count set at launch.
Auto-Scale Nodes Up
  • Number of concurrent sessions is expected to hit the maximum within 4 hours (predicted based on the last 2 hours of service usage).
Upgrade all nodes to the next available size.
Auto-Scale Nodes Down
  • CPU utilization < 50% over all replicas sustained for 1 hour.
  • Number of concurrent sessions < 50% over all replicas sustained for 1 hour.
Downgrade nodes.
Node size will not decrease below the initial node size set at launch.
+

Autonomous actions are not instantaneous.

+

Cooldown periods may apply. A cooldown period is the time period after a scaling operation is completed +and before another scaling operation can occur. The cooldown period for storage scaling is 6 hours.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/Binarylog Backup Examples/index.html b/Backup and Restore/Binarylog Backup Examples/index.html new file mode 100644 index 00000000..da276868 --- /dev/null +++ b/Backup and Restore/Binarylog Backup Examples/index.html @@ -0,0 +1,2715 @@ + + + + + + + + + + + + + + + + + + + + + + + Binarylog Backup Examples - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Binarylog Backup Examples

+ +
+ +Authentication + +

+
    +
  1. +Go to the SkySQL API Key management page and generate an API key +
  2. +
  3. +Export the value from the token field to an environment variable $API_KEY + +
    export API_KEY='... key data ...'
    +
    +
  4. +
  5. +Use it on subsequent request, e.g: + +
    curl --request GET 'https://api.skysql.com/skybackup/v1/backups/schedules' --header "X-API-Key: ${API_KEY}"
    +
    +
  6. +
+

+ +

Binarylog Backup

+

One-time Binarylog

+

To set up an one-time binarylog backup:

+

curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules' \
+        --header 'Content-Type: application/json' \
+        --header 'Accept: application/json' \
+        --header "X-API-Key: $API_KEY" \
+        --data "{
+            \"backup_type\": \"full\",
+            \"schedule\": \"once\",
+            \"service_id\": \"$SERVICE_ID\"
+            }"
+
+- API_KEY : SKYSQL API KEY, see SkySQL API Keys +- SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx. You can fetch the service ID from the Fully qualified domain name(FQDN) of your service. E.g: in dbpgf17106534.sysp0000.db2.skysql.com, 'dbpgf17106534' is the service ID.You will find the FQDN in the Connect window

+
Schedule Binarylog backup
+

To set up an cron incremental backup:

+
       curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules'
+        --header 'Content-Type: application/json' \
+        --header 'Accept: application/json' \
+        --header "X-API-Key: $API_KEY" \
+        --data "{
+        \"backup_type\": \"binarylog\",
+        \"schedule\": \"0 3 * * *\",
+        \"service_id\": \"$SERVICE_ID\"
+        }"
+
+
    +
  • API_KEY : SKYSQL API KEY, see SkySQL API Keys
  • +
  • SCHEDULE : Cron schedule, see Cron
  • +
  • SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx
  • +
+
Backup status can be fetch using 'https://api.skysql.com/skybackup/v1/backups'. See the 'Backup Status' section for an example.
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/Bring Your Own Bucket Examples/index.html b/Backup and Restore/Bring Your Own Bucket Examples/index.html new file mode 100644 index 00000000..7ffe53e9 --- /dev/null +++ b/Backup and Restore/Bring Your Own Bucket Examples/index.html @@ -0,0 +1,2668 @@ + + + + + + + + + + + + + + + + + + + + + + + Bring Your Own Bucket Examples - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Bring Your Own Bucket Examples

+ +
+ +Authentication + +

+
    +
  1. +Go to the SkySQL API Key management page and generate an API key +
  2. +
  3. +Export the value from the token field to an environment variable $API_KEY + +
    export API_KEY='... key data ...'
    +
    +
  4. +
  5. +Use it on subsequent request, e.g: + +
    curl --request GET 'https://api.skysql.com/skybackup/v1/backups/schedules' --header "X-API-Key: ${API_KEY}"
    +
    +
  6. +
+

+ +

Scheduling Backups to your own bucket (external storage)

+

To set up an external storage backup, you need to make the following API call:

+
    +
  • +

    For GCP you need to create an service account key. Please follow the steps from this documentation. Once you have created the service account key you will need to base64 encode it. You can encode it directly from a command line itself. For example the execution of command echo -n 'service-account-key' | base64 will produce something like c2VydmljZS1hY2NvdW50LWtleQ==

    +
    curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules' \
    +--header 'Content-Type: application/json' \
    +--header 'Accept: application/json' \
    +--header 'X-API-Key: ${API_KEY}' \
    +--data '{
    +    "backup_type": "full",
    +    "schedule": "0 2 * * *",
    +    "service_id": "dbtgf28044362",
    +    "external_storage": {
    +        "bucket": {
    +            "path": "s3://my_backup_bucket",
    +            "credentials": "c2VydmljZS1hY2NvdW50LWtleQ=="
    +        }
    +    }
    +}'
    +
    +

    The service account key will be in the following format:

    +
    {
    +    "type": "service_account",
    +    "project_id": "XXXXXXX",
    +    "private_key_id": "XXXXXXX",
    +    "private_key": "-----BEGIN PRIVATE KEY-----XXXXX-----END PRIVATE KEY-----",
    +    "client_email": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX.iam.gserviceaccount.com",
    +    "client_id": "XXXXXXX",
    +    "auth_uri": "<https://accounts.google.com/o/oauth2/auth>",
    +    "token_uri": "<https://oauth2.googleapis.com/token>",
    +    "auth_provider_x509_cert_url": "<https://www.googleapis.com/oauth2/v1/certs>",
    +    "client_x509_cert_url": "<https://www.googleapis.com/robot/v1/metadata/x509/XXXXXXXXXXXXXX.iam.gserviceaccount.com>",
    +    "universe_domain": "googleapis.com"
    +}
    +
    +
  • +
  • +

    For AWS, you must provide your own credentials. These include the AWS access key associated with an IAM account and the bucket region. For more information about AWS credentials, please refer to the documentation. The required credentials are aws_access_key_id , aws_secret_access_key and region. For example your credentials should look like:

    +
    [default]
    +aws_access_key_id = AKIAIOSFODNN7EXAMPLE
    +aws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
    +region = us-west-2
    +
    +

    You should encode your credentials base64 before passing it to the API. You can encode it directly from a command line itself. For example the execution of command echo '[default]\naws_access_key_id = AKIAIOSFODNN7EXAMPLE\naws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\nregion = us-west-2' | base64 will produce the following W2RlZmF1bHRdCmF3c19hY2Nlc3Nfa2V5X2lkID0gQUtJQUlPU0ZPRE5ON0VYQU1QTEUKYXdzX3NlY3JldF9hY2Nlc3Nfa2V5ID0gd0phbHJYVXRuRkVNSS9LN01ERU5HL2JQeFJmaUNZRVhBTVBMRUtFWQpyZWdpb24gPSB1cy13ZXN0LTIK. +Using encoded credentials you will be able to pass it to the API server. To initiate a new backup to your external storage you need to execute an API call to the backup service:

    +

    ```bash +curl --location '<https://api.skysql.com/skybackup/v1/backups/schedules>' \ +--header 'Content-Type: application/json' \ +--header 'Accept: application/json' \ +--header 'X-API-Key: ${API_KEY}' \ +--data '{ + "backup_type": "full", + "schedule": "0 2 ** *", + "service_id": "dbtgf28044362", + "external_storage": { + "bucket": { + "path": "s3://my_backup_bucket", + "credentials": "W2RlZmF1bHRdCmF3c19hY2Nlc3Nfa2V5X2lkID0gQUtJQUlPU0ZPRE5ON0VYQU1QTEUKYXdzX3NlY3JldF9hY2Nlc3Nfa2V5ID0gd0phbHJYVXRuRkVNSS9LN01ERU5HL2JQeFJmaUNZRVhBTVBMRUtFWQpyZWdpb24gPSB1cy13ZXN0LTIK" + } + } +}'

    +
  • +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/Incremental Backup Examples/index.html b/Backup and Restore/Incremental Backup Examples/index.html new file mode 100644 index 00000000..e2a5a20f --- /dev/null +++ b/Backup and Restore/Incremental Backup Examples/index.html @@ -0,0 +1,2716 @@ + + + + + + + + + + + + + + + + + + + + + + + Incremental Backup Examples - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Incremental Backup Examples

+ +
+ +Authentication + +

+
    +
  1. +Go to the SkySQL API Key management page and generate an API key +
  2. +
  3. +Export the value from the token field to an environment variable $API_KEY + +
    export API_KEY='... key data ...'
    +
    +
  4. +
  5. +Use it on subsequent request, e.g: + +
    curl --request GET 'https://api.skysql.com/skybackup/v1/backups/schedules' --header "X-API-Key: ${API_KEY}"
    +
    +
  6. +
+

+ +

Incremental Backup

+

Incremental backups can be taken once you have full backup. Read here for more details.

+

One-time Incremental

+

To set up an one-time incremental backup, you need to make the following API call:

+
curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules' \
+--header 'Content-Type: application/json' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}' \
+--data '{
+    "backup_type": "incremental",
+    "schedule": "once",
+    "service_id": "dbtgf28044362"
+}'
+
+
    +
  • API_KEY : SKYSQL API KEY, see SkySQL API Keys
  • +
  • SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx. You can fetch the service ID from the Fully qualified domain name(FQDN) of your service. E.g: in dbpgf17106534.sysp0000.db2.skysql.com, 'dbpgf17106534' is the service ID.You will find the FQDN in the Connect window
  • +
+

Cron Incremental

+

To set up an cron incremental backup, you need to make the following API call:

+

curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules' \
+--header 'Content-Type: application/json' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}' \
+--data '{
+    "backup_type": "incremental",
+    "schedule": "0 3 * * *",
+    "service_id": "dbtgf28044362"
+}'
+
+- API_KEY : SKYSQL API KEY, see SkySQL API Keys +- SCHEDULE : Cron schedule, see Cron +- SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx

+
Backup status can be fetch using 'https://api.skysql.com/skybackup/v1/backups'. See the 'Backup Status' section for an example.
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/Logical Backup Examples/index.html b/Backup and Restore/Logical Backup Examples/index.html new file mode 100644 index 00000000..89b83f1f --- /dev/null +++ b/Backup and Restore/Logical Backup Examples/index.html @@ -0,0 +1,2685 @@ + + + + + + + + + + + + + + + + + + + + + + + Logical Backup Examples - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Logical Backup Examples

+ +
+ +Authentication + +

+
    +
  1. +Go to the SkySQL API Key management page and generate an API key +
  2. +
  3. +Export the value from the token field to an environment variable $API_KEY + +
    export API_KEY='... key data ...'
    +
    +
  4. +
  5. +Use it on subsequent request, e.g: + +
    curl --request GET 'https://api.skysql.com/skybackup/v1/backups/schedules' --header "X-API-Key: ${API_KEY}"
    +
    +
  6. +
+

+ +

Logical(dump) Backup

+
curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules' \
+--header 'Content-Type: application/json' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}' \
+--data '{
+    "backup_type": "dump",
+    "schedule": "once",
+    "service_id": "dbtgf28044362"
+}'
+
+
    +
  • API_KEY : SKYSQL API KEY, see SkySQL API Keys
  • +
  • SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx. You can fetch the service ID from the Fully qualified domain name(FQDN) of your service. E.g: in dbpgf17106534.sysp0000.db2.skysql.com, 'dbpgf17106534' is the service ID.You will find the FQDN in the Connect window
  • +
+

Logical(dump) Backup

+

To set up an cron Logical(dump) backup, you need to make the following API call:

+
curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules' \
+--header 'Content-Type: application/json' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}' \
+--data '{
+    "backup_type": "dump",
+    "schedule": "0 3 * * *",
+    "service_id": "dbtgf28044362"
+}'
+
+
    +
  • API_KEY : SKYSQL API KEY, see SkySQL API Keys
  • +
  • SCHEDULE : Cron schedule, see Cron
  • +
  • SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx
  • +
+
Backup status can be fetch using 'https://api.skysql.com/skybackup/v1/backups'. See the 'Backup Status' section for an example.
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/MariaDB Enterprise Backup/index.html b/Backup and Restore/MariaDB Enterprise Backup/index.html new file mode 100644 index 00000000..9579a8cb --- /dev/null +++ b/Backup and Restore/MariaDB Enterprise Backup/index.html @@ -0,0 +1,3415 @@ + + + + + + + + + + + + + + + + + + + + + + + MariaDB Enterprise Backup - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

MariaDB Enterprise Backup

+

Regular and reliable backups are essential to successful recovery of mission critical applications.Ā MariaDB Enterprise ServerĀ backup and restore operations are performed usingĀ MariaDB Enterprise Backup, anĀ enterprise-buildĀ ofĀ MariaDB Backup.

+

MariaDB Enterprise BackupĀ is compatible with MariaDB Enterprise Server 10.2, 10.3, 10.4, 10.5, and 10.6.

+ +

Storage Engines and Backup Types

+

MariaDB Enterprise Backup creates a file-level backup of data from the MariaDB Enterprise Server data directory. This backup includesĀ temporal data, and the encrypted and unencrypted tablespaces of supported storage engines (e.g.,Ā InnoDB,Ā MyRocks,Ā Aria).

+

MariaDB Enterprise Server implements:

+
    +
  • Full backups, which contain all data in the database.
  • +
  • Incremental backups, which contain modifications since the last backup.
  • +
  • Partial backups, which contain a subset of the tables in the database.
  • +
+

Backup support is specific to storage engines. All supported storage engines enable full backup. The InnoDB storage engine additionally supports incremental backup.

+

Non-blocking Backups

+

A feature of MariaDB Enterprise Backup and MariaDB Enterprise Server, non-blocking backups minimize workload impact during backups. When MariaDB Enterprise Backup connects to MariaDB Enterprise Server, staging operations are initiated to protect data during read.

+

Non-blocking backup functionality differs from historical backup functionality in the following ways:

+
    +
  • MariaDB Enterprise Backup in MariaDB Enterprise Server includes enterprise-only optimizations to backup staging, including DDL statement tracking, which reduces lock-time during backups.
  • +
  • MariaDB Backup in MariaDB Community Server 10.4 and later will block writes, log tables, and statistics.
  • +
  • Older MariaDB Community Server releases usedĀ FLUSHĀ TABLESĀ WITHĀ READĀ LOCK, which closed open tables and only allowed tables to be reopened with a read lock during the duration of backups.
  • +
+

Understanding Recovery

+

MariaDB Enterprise Backup creates complete or incremental backups of MariaDB Enterprise Server data, and is also used to restore data from backups produced using MariaDB Enterprise Backup.

+

Preparing Backups for Recovery

+

Full backups produced using MariaDB Enterprise Server are not initially point-in-time consistent, and an attempt to restore from a raw full backup will cause InnoDB to crash to protect the data.

+

Incremental backups produced using MariaDB Enterprise Backup contain only the changes since the last backup and cannot be used standalone to perform a restore.

+

To restore from a backup, you first need to prepare the backup for point-in-time consistency using theĀ --prepareĀ command:

+
    +
  • Running theĀ -prepareĀ command on aĀ full backupĀ synchronizes the tablespaces, ensuring that they are point-in-time consistent and ready for use in recovery.
  • +
  • Running theĀ -prepareĀ command on anĀ incremental backupĀ synchronizes the tablespaces and also applies the updated data into the previous full backup, making it a complete backup ready for use in recovery.
  • +
  • Running theĀ -prepareĀ command on data that is to be used for aĀ partial restoreĀ (when restoring only one or more selected tables) requires that you also use theĀ -exportĀ option to create the necessaryĀ .cfgĀ files to use in recovery.
  • +
+

Restore Requires Empty Data Directory

+

When MariaDB Enterprise Backup restores from a backup, it copies or moves the backup files into the MariaDB Enterprise Server data directory, as defined by theĀ datadirĀ system variable.

+

For MariaDB Enterprise Backup to safely restore data from full and incremental backups, the data directory must be empty. One way to achieve this is to move the data directory aside to a unique directory name:

+
    +
  1. Make sure that the Server is stopped.
  2. +
  3. Move the data directory to a unique name (for example,Ā /var/lib/mysql-2020-01-01)Ā ORĀ remove the old data directory (depending on how much space you have available).
  4. +
  5. Create a new (empty) data directory (for example,Ā mkdirĀ /var/lib/mysql).
  6. +
  7. Run MariaDB Enterprise Backup to restore the databases into that directory.
  8. +
  9. Change the ownership of all the restored files to the correct system user (for example,Ā chownĀ -RĀ mysql:mysqlĀ /var/lib/mysql).
  10. +
  11. Start MariaDB Enterprise Server, which now uses the restored data directory.
  12. +
  13. When ready, and if you have not already done so, delete the old data directory to free disk space.
  14. +
+

Creating the Backup User

+

When MariaDB Enterprise Backup performs a backup operation, it not only copies files from the data directory but also connects to the running MariaDB Enterprise Server.

+

This connection to MariaDB Enterprise Server is used to manage locks and backup staging that prevent the Server from writing to a file while being read for a backup.

+

MariaDB Enterprise Backup establishes this connection based on the user credentials specified with theĀ --userĀ andĀ --passwordĀ options when performing a backup.

+

It is recommended that a dedicated user be created and authorized to perform backups.

+

10.5 and Later

+

MariaDB Enterprise Backup 10.5 and later requires this user to have theĀ RELOAD,Ā PROCESS,Ā LOCKĀ TABLES, andĀ BINLOGĀ MONITORĀ privileges. (TheĀ BINLOG MONITORĀ privilege replaced theĀ REPLICATIONĀ CLIENTĀ privilege in MariaDB Enterprise Server 10.5.):

+

**CREATE** **USER** 'mariabackup'@'localhost'IDENTIFIED **BY** 'mbu_passwd'**;GRANT** RELOAD**,** PROCESS**,** **LOCK** TABLES**,** BINLOG MONITOR**ON** ***.*****TO** 'mariabackup'@'localhost'**;**

+

In the above example, MariaDB Enterprise Backup would run on the local system that runs MariaDB Enterprise Server. Where backups may be run against a remote server, the user authentication and authorization should be adjusted.

+

While MariaDB Enterprise Backup requires a user for backup operations, no user is required for restore operations since restores occur while MariaDB Enterprise Server is not running.

+

10.4 and Earlier

+

MariaDB Enterprise Backup 10.4 and earlier requires this user to have theĀ RELOAD,Ā PROCESS,Ā LOCKĀ TABLES, andĀ REPLICATIONĀ CLIENTĀ privileges. (TheĀ BINLOG MONITORĀ privilege replaced theĀ REPLICATIONĀ CLIENTĀ privilege in MariaDB Enterprise Server 10.5.):

+

**CREATE** **USER** 'mariabackup'@'localhost'IDENTIFIED **BY** 'mbu_passwd'**;GRANT** RELOAD**,** PROCESS**,** **LOCK** TABLES**,** REPLICATION CLIENT**ON** ***.*****TO** 'mariabackup'@'localhost'**;**

+

In the above example, MariaDB Enterprise Backup would run on the local system that runs MariaDB Enterprise Server. Where backups may be run against a remote server, the user authentication and authorization should be adjusted.

+

While MariaDB Enterprise Backup requires a user for backup operations, no user is required for restore operations since restores occur while MariaDB Enterprise Server is not running.

+

Full Backup and Restore

+

Full backups performed with MariaDB Enterprise Backup contain all table data present in the database.

+

When performing a full backup, MariaDB Enterprise Backup makes a file-level copy of the MariaDB Enterprise Server data directory. This backup omits log data such as the binary logs (binlog), error logs, general query logs, and slow query logs.

+

Performing Full Backups

+

When you perform a full backup, MariaDB Enterprise Backup writes the backup to theĀ --target-dirĀ path. The directory must be empty or non-existent and the operating system user account must have permission to write to that directory. A database user account is required to perform the backup.

+

The version ofĀ mariabackupĀ orĀ mariadb-backupĀ should be the same version as the MariaDB Enterprise Server version. When the version does not match the server version, errors can sometimes occur, or the backup can sometimes be unusable.

+

To create a backup, executeĀ mariabackupĀ orĀ mariadb-backupĀ with theĀ [--backup](https://mariadb.com/docs/server/ref/mdb/cli/mariadb-backup/backup/)Ā option, and provide the database user account credentials using theĀ [--user](https://mariadb.com/docs/server/ref/mdb/cli/mariadb-backup/user/)Ā andĀ [--password](https://mariadb.com/docs/server/ref/mdb/cli/mariadb-backup/password/)Ā options:

+

$ sudo mariabackup --backup \ --target-dir=/data/backups/full \ --user=mariabackup \ --password=mbu_passwd

+

Subsequent to the above example, the backup is now available in the designatedĀ --target-dirĀ path.

+

Preparing a Full Backup for Recovery

+

A raw full backup is notĀ point-in-time consistentĀ and must be prepared before it can be used for a restore. The backup can be prepared any time after the backup is created and before the backup is restored. However, MariaDB recommends preparing a backup immediately after taking the backup to ensure that the backup is consistent.

+

The backup should be prepared with the same version of MariaDB Enterprise Backup that was used to create the backup.

+

To prepare the backup, executeĀ mariabackupĀ orĀ mariadb-backupĀ with theĀ [--prepare](https://mariadb.com/docs/server/ref/mdb/cli/mariadb-backup/prepare/)Ā option:

+

$ sudo mariabackup --prepare \ --use-memory=34359738368 \ --target-dir=/data/backups/full

+

For best performance, theĀ [--use-memory](https://mariadb.com/docs/server/ref/mdb/cli/mariadb-backup/use-memory/)Ā option should be set to the server'sĀ [innodb_buffer_pool_size](https://mariadb.com/docs/server/ref/mdb/system-variables/innodb_buffer_pool_size/)Ā value.

+

Restoring from Full Backups

+

Once a full backup has beenĀ preparedĀ to be point-in-time consistent, MariaDB Enterprise Backup is used to copy backup data to the MariaDB Enterprise Server data directory.

+

To restore from a full backup:

+
    +
  1. Stop the MariaDB Enterprise Server
  2. +
  3. EmptyĀ the data directory
  4. +
  5. +

    Restore from the "full" directory using theĀ -copy-backĀ option:

    +

    $ sudo mariabackup --copy-back --target-dir=/data/backups/full

    +
  6. +
+

MariaDB Enterprise Backup writes to the data directory as the current user, which can be changed usingĀ sudo. To confirm that restored files are properly owned by the user that runs MariaDB Enterprise Server, run a command like this (adapted for the correct user/group):

+

$ sudo chown -R mysql:mysql /var/lib/mysql

+

Once this is done, start MariaDB Enterprise Server:

+

$ sudo systemctl start mariadb

+

When the Server starts, it works from the restored data directory.

+

Incremental Backup and Restore

+

Full backups of large data-sets can be time-consuming and resource-intensive. MariaDB Enterprise Backup supports the use of incremental backups to minimize this impact.

+

While full backups are resource-intensive at time of backup, the resource burden around incremental backups occurs when preparing for restore. First, the full backup is prepared for restore, then each incremental backup is applied.

+

Performing Incremental Backups

+

When you perform an incremental backup, MariaDB Enterprise Backup compares a previous full or incremental backup to what it finds on MariaDB Enterprise Server. It then creates a new backup containing the incremental changes.

+

Incremental backup is supported for InnoDB tables. Tables using other storage engines receive full backups even during incremental backup operations.

+

To increment a full backup, use theĀ --incremental-basedirĀ option to indicate the path to the full backup and theĀ --target-dirĀ option to indicate where you want to write the incremental backup:

+

$ sudo mariabackup --backup \ --incremental-basedir=/data/backups/full \ --target-dir=/data/backups/inc1 \ --user=mariabackup \ --password=mbu_passwd

+

In this example, MariaDB Enterprise Backup reads theĀ /data/backups/fullĀ directory, and MariaDB Enterprise Server then creates an incremental backup in theĀ /data/backups/inc1Ā directory.

+

Preparing an Incremental Backup

+

An incremental backup must be applied to a prepared full backup before it can be used in a restore operation. If you have multiple full backups to choose from, pick the nearest full backup prior to the incremental backup that you want to restore. You may also want to back up your full-backup directory, as it will be modified by the updates in the incremental data.

+

If your full backup directory is not yet prepared, run this to make it consistent:

+

$ sudo mariabackup --prepare --target-dir=/data/backups/full

+

Then, using the prepared full backup, apply the first incremental backup's data to the full backup in an incremental preparation step:

+

$ sudo mariabackup --prepare \ --target-dir=/data/backups/full \ --incremental-dir=/data/backups/inc1

+

Once the incremental backup has been applied to the full backup, the full backup directory contains the changes from the incremental backup (that is, theĀ inc1/Ā directory). Feel free to removeĀ inc1/Ā to save disk space.

+

Restoring from Incremental Backups

+

Once you have prepared the full backup directory with all the incremental changes you need (as described above), stop the MariaDB Enterprise Server,Ā emptyĀ its data directory, and restore from the original full backup directory using theĀ --copy-backĀ option:

+

$ sudo mariabackup --copy-back --target-dir=/data/backups/full

+

MariaDB Enterprise Backup writes files into the data directory using either the current user or root (in the case of a sudo operation), which may be different from the system user that runs the database. Run the following to recursively update the ownership of the restored files and directories:

+

$ sudo chown -R mysql:mysql /var/lib/mysql

+

Then, start MariaDB Enterprise Server. When the Server starts, it works from the restored data directory.

+

Partial Backup and Restore

+

In a partial backup, MariaDB Enterprise Backup copies a specified subset of tablespaces from the MariaDB Enterprise Server data directory. Partial backups are useful in establishing a higher frequency of backups on specific data, at the expense of increased recovery complexity. In selecting tablespaces for a partial backup, please consider referential integrity.

+

Performing a Partial Backup

+

Command-line options can be used to narrow the set of databases or tables to be included within a backup:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionDescription
--databasesList of databases to include
--databases-excludeList of databases to omit from the backup
--databases-filePath to file listing the databases to include
--tablesList of tables to include
--tables-excludeList of tables to exclude
--tables-filePath to file listing the tables to include
+

For example, you may wish to produce a partial backup, which excludes a specific database:

+

$ sudo mariabackup --backup \ --target-dir=/data/backups/part \ --user=mariabackup \ --password=mbu_passwd \ --database-exclude=test

+

Partial backups can also be incremental:

+

$ sudo mariabackup --backup \ --incremental-basedir=/data/backups/part \ --target-dir=/data/backups/part_inc1 \ --user=mariabackup \ --password=mbu_passwd \ --database-exclude=test

+

Preparing a Backup Before a Partial Restore

+

As with full and incremental backups, partial backups are not point-in-time consistent. A partial backup must be prepared before it can be used for recovery.

+

A partial restore can be performed from a full backup or partial backup.

+

The preparation step for either partial or full backup restoration requires the use of transportable tablespaces for InnoDB. As such, each prepare operation requires theĀ --exportĀ option:

+

$ sudo mariabackup --prepare --export --target-dir=/data/backups/part

+

When using a partial incremental backup for restore, the incremental data must be applied to its prior partial backup data before its data is complete. If performing partial incremental backups, run the prepare statement again to apply the incremental changes onto the partial backup that served as the base.

+

$ sudo mariabackup --prepare --export \ --target-dir=/data/backups/part \ --incremental-dir=/data/backups/part_inc1

+

Performing a Partial Restore

+

Unlike full and incremental backups, you cannot restore partial backups directly using MariaDB Enterprise Backup. Further, as a partial backup does not contain a complete data directory, you cannot restore MariaDB Enterprise Server to a startable state solely with a partial backup.

+

To restore from a partial backup, you need to prepare a table on the MariaDB Enterprise Server, then manually copy the files into the data directory.

+

The details of the restore procedure depend on the characteristics of the table:

+ +

As partial restores are performed while the server is running, not stopped, care should be taken to prevent production workloads during restore activity.

+
+

Note

+

You can also use data from a full backup in a partial restore operation if you have prepared the data using theĀ --exportĀ option as described above.

+
+

Partial Restore Non-partitioned Tables

+

To restore a non-partitioned table from a backup, first create a new table on MariaDB Enterprise Server to receive the restored data. It should match the specifications of the table you're restoring.

+

Be extra careful if the backup data is from a server with a different version than the restore server, as some differences (such as a differingĀ ROW_FORMAT) can cause an unexpected result.

+
    +
  1. +

    Create an empty table for the data being restored:

    +

    **CREATE** **TABLE** test**.**address_book **(** id INT **PRIMARY** **KEY** AUTO_INCREMENT**,** name VARCHAR**(**255**),** email VARCHAR**(**255**));**

    +
  2. +
  3. +

    Modify the table to discard the tablespace:

    +

    **ALTER** **TABLE** test**.**address_book DISCARD TABLESPACE**;**

    +
  4. +
  5. +

    You can copy (or move) the files for the table from the backup to the data directory:

    +

    $ sudo cp /data/backups/part_inc1/test/address_book.* /var/lib/mysql/test

    +
  6. +
  7. +

    Use a wildcard to include both theĀ .ibdĀ andĀ .cfgĀ files. Then, change the owner to the system user running MariaDB Enterprise Server:

    +

    $ sudo chown mysql:mysql /var/lib/mysql/test/address_book.*

    +
  8. +
  9. +

    Lastly, import the new tablespace:

    +

    **ALTER** **TABLE** test**.**address_book IMPORT TABLESPACE**;**

    +

    MariaDB Enterprise Server looks in the data directory for the tablespace you copied in, then imports it for use. If the table is encrypted, it also looks for the encryption key with the relevant key ID that the table data specifies.

    +
  10. +
  11. +

    Repeat this step for every table you wish to restore.

    +
  12. +
+

Partial Restore Partitioned Tables

+

Restoring a partitioned table from a backup requires a few extra steps compared to restoring a non-partitioned table.

+

To restore a partitioned table from a backup, first create a new table on MariaDB Enterprise Server to receive the restored data. It should match the specifications of the table you're restoring, including the partition specification.

+

Be extra careful if the backup data is from a server with a different version than the restore server, as some differences (such as a differingĀ ROW_FORMAT) can cause an unexpected result.

+
    +
  1. +

    Create an empty table for the data being restored:

    +

    **CREATE** **TABLE** test**.**students **(** id INT **NOT** **NULL** AUTO_INCREMENT**,** name VARCHAR**(**255**),** email VARCHAR**(**255**),** graduating_year **YEAR,** **PRIMARY** **KEY** **(**id**,** graduating_year**))** ENGINE = InnoDBPARTITION **BY** RANGE **(**graduating_year**)** **(** PARTITION p0 **VALUES** **LESS** **THAN** **(**2019**),** PARTITION p1 **VALUES** **LESS** **THAN** **MAXVALUE);**

    +
  2. +
  3. +

    Then create a second empty table matching the column specification, but without partitions. This will be your working table:

    +

    **CREATE** **TABLE** test**.**students_work **ASSELECT** * **FROM** test**.**students **WHERE** **NULL;**

    +
  4. +
  5. +

    For each partition you want to restore, discard the working table's tablespace:

    +

    **ALTER** **TABLE** test**.**students_work DISCARD TABLESPACE**;**

    +
  6. +
  7. +

    Then, copy the table files from the backup, using the new name:

    +

    $ sudo cp /data/backups/part_inc1/test/students.ibd /var/lib/mysql/test/students_work.ibd +$ sudo cp /data/backups/part_inc1/test/students.cfg /var/lib/mysql/test/students_work.cfg

    +
  8. +
  9. +

    Change the owner to that of the user running MariaDB Enterprise Server:

    +

    $ sudo chown mysql:mysql /var/lib/mysql/test/students_work.*

    +
  10. +
  11. +

    Import the copied tablespace:

    +

    **ALTER** **TABLE** test**.**students_work IMPORT TABLESPACE**;**

    +
  12. +
  13. +

    Lastly, exchange the partition, copying the tablespace from the working table into the partition file for the target table:

    +

    **ALTER** **TABLE** test**.**students EXCHANGE PARTITION p0 **WITH** **TABLE** test**.**students_work**;**

    +
  14. +
  15. +

    Repeat the above process for each partition until you have them all exchanged into the target table. Then delete the working table, as it's no longer necessary:

    +

    **DROP** **TABLE** test**.**students_work**;**

    +

    This restores a partitioned table.

    +
  16. +
+

Partial Restore of Tables with Full-Text Indexes

+

When restoring a table with a full-text search (FTS) index, InnoDB may throw a schema mismatch error.

+

In this case, to restore the table, it is recommended to:

+
    +
  • Remove the correspondingĀ .cfgĀ file.
  • +
  • Restore data to a table without any secondary indexes including FTS.
  • +
  • Add the necessary secondary indexes to the restored table.
  • +
+

For example, to restore tableĀ t1Ā with FTS index from databaseĀ db1:

+
    +
  1. +

    In the MariaDB shell, drop the table you are going to restore:

    +

    **DROP** **TABLE** **IF** **EXISTS** db1**.**t1**;**

    +
  2. +
  3. +

    Create an empty table for the data being restored:

    +

    **CREATE** **TABLE** db1**.**t1**(**f1 CHAR**(**10**))** ENGINE=INNODB**;**

    +
  4. +
  5. +

    Modify the table to discard the tablespace:

    +

    **ALTER** **TABLE** db1**.**t1 DISCARD TABLESPACE**;**

    +
  6. +
  7. +

    In the operating system shell, copy the table files from the backup to the data directory of the corresponding database:

    +

    $ sudo cp /data/backups/part/db1/t1.* /var/lib/mysql/db1

    +
  8. +
  9. +

    Remove theĀ .cfgĀ file from the data directory:

    +

    $ sudo rm /var/lib/mysql/db1/t1.cfg

    +
  10. +
  11. +

    Change the owner of the newly copied files to the system user running MariaDB Enterprise Server:

    +

    $ sudo chown mysql:mysql /var/lib/mysql/db1/t1.*

    +
  12. +
  13. +

    In the MariaDB shell, import the copied tablespace:

    +

    **ALTER** **TABLE** db1**.**t1 IMPORT TABLESPACE**;**

    +
  14. +
  15. +

    Verify that the data has been successfully restored:

    +

    **SELECT** * **FROM** db1**.**t1**;**

    +

    +--------+ +| f1 | ++--------+ +| ABC123 | ++--------+

    +
  16. +
  17. +

    Add the necessary secondary indexes:

    +

    **ALTER** **TABLE** db1**.**t1 **FORCE,** **ADD** FULLTEXT **INDEX** f_idx**(**f1**);**

    +
  18. +
  19. +

    The table is now fully restored:

    +

    **SHOW** **CREATE** **TABLE** db1**.**t1**\G**

    +
      +
    • +
      *************************** 1. row ***************************
      +       Table: t1
      +Create Table: CREATE TABLE `t1` (
      +  `f1` char(10) DEFAULT NULL,
      +  FULLTEXT KEY `f_idx` (`f1`)
      +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci
      +
      +
    • +
    +
  20. +
+

Point-in-Time Recoveries

+

Recovering from a backup restores the data directory at a specific point-in-time, but it does not restore the binary log. In a point-in-time recovery, you begin by restoring the data directory from a full or incremental backup, then use theĀ mysqlbinlogĀ utility to recover the binary log data to a specific point in time.

+
    +
  1. +

    First, prepare the backup as you normally would for aĀ fullĀ orĀ incrementalĀ backup:

    +

    $ sudo mariabackup --prepare --target-dir=/data/backups/full

    +
  2. +
  3. +

    When MariaDB Enterprise Backup runs on a MariaDB Enterprise Server where binary logs is enabled, it stores binary log information in theĀ xtrabackup_binlog_infoĀ file. Consult this file to find the name of the binary log position to use. In the following example, the log position isĀ 321.

    +

    `$ sudo cat /data/backups/full/xtraback_binlog_info

    +

    mariadb-node4.00001 321`

    +
  4. +
  5. +

    Update the configuration file to use a new data directory.

    +

    **[mysqld]**datadir=/var/lib/mysql_new

    +
  6. +
  7. +

    Using MariaDB Enterprise Backup, restore from the backup to the new data directory:

    +

    $ sudo mariabackup --copy-back --target-dir=/data/backups/full

    +
  8. +
  9. +

    Then change the owner to the MariaDB Enterprise Server system user:

    +

    $ sudo chown -R mysql:mysql /var/lib/mysql_new

    +
  10. +
  11. +

    Start MariaDB Enterprise Server:

    +

    $ sudo systemctl start mariadb

    +
  12. +
  13. +

    Using the binary log file in the old data directory, the start position in theĀ xtrabackup_binlog_infoĀ file, the date and time you want to restore to, and theĀ mysqlbinlogĀ utility to create an SQL file with the binary log changes:

    +

    $ mysqlbinlog --start-position=321 \ --stop-datetime="2019-06-28 12:00:00" \ /var/lib/mysql/mariadb-node4.00001 \ > mariadb-binlog.sql

    +
  14. +
  15. +

    Lastly, run the binary log SQL to restore the databases:

    +

    $ mysql -u root -p < mariadb-binlog.sql

    +
  16. +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/Other backup API examples/index.html b/Backup and Restore/Other backup API examples/index.html new file mode 100644 index 00000000..232754e9 --- /dev/null +++ b/Backup and Restore/Other backup API examples/index.html @@ -0,0 +1,2871 @@ + + + + + + + + + + + + + + + + + + + + + + + Other backup API examples - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Other backup API examples

+ +
+ +Authentication + +

+
    +
  1. +Go to the SkySQL API Key management page and generate an API key +
  2. +
  3. +Export the value from the token field to an environment variable $API_KEY + +
    export API_KEY='... key data ...'
    +
    +
  4. +
  5. +Use it on subsequent request, e.g: + +
    curl --request GET 'https://api.skysql.com/skybackup/v1/backups/schedules' --header "X-API-Key: ${API_KEY}"
    +
    +
  6. +
+

+ +

Working with Backup Schedules

+

Get backup schedules inside the Organization :

+
curl --location '<https://api.skysql.com/skybackup/v1/backups/schedules>' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}'
+
+ +

Get all Backup Schedules per service

+

To get backup schedules for specific service :

+

curl --location '<https://api.skysql.com/skybackup/v1/backups/schedules?service_id=dbtgf28044362>' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}'
+
+- API_KEY : SKYSQL API KEY, see SkySQL API Keys

+

Get Backup Schedule by ID

+

To get specific backup schedule by id :

+
curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules/200' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}'
+
+ +

Update Backup Schedule

+

In the following example, we update the backup schedule to 9 AM UTC. Remember, you cannot change the schedules for one-time backups. +To update specific backup schedule you need to make the following API call:

+

curl --location --request PATCH '<https://api.skysql.com/skybackup/v1/backups/schedules/215>' \
+--header 'Content-Type: application/json' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}' \
+--data '{
+  "schedule": "0 9 ** *"
+}'
+
+- API_KEY : SKYSQL API KEY, see SkySQL API Keys +- SCHEDULE : Cron schedule, see Cron

+

Delete Backup Schedule

+

To delete a backup schedule you need to provide the backup schedule id. Example of the api call below:

+
curl --location --request DELETE 'https://api.skysql.com/skybackup/v1/backups/schedules/215' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}'
+
+ +

Backup Status

+

The following API illustrates how to get the available backups and status of backup jobs .

+

List all backups inside the organization

+

Here is an example to fetch all the available Backups in your org:

+

curl --location 'https://api.skysql.com/skybackup/v1/backups' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}'
+
+- API_KEY : SKYSQL API KEY, see SkySQL API Keys

+

List all backups by service

+

To list all backups available for your service :

+

curl --location 'https://api.skysql.com/skybackup/v1/backups?service_id=dbtgf28216706' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}'
+
+- API_KEY : SKYSQL API KEY, see SkySQL API Keys

+

The typical response of either of two calls should look like:

+
{
+    "backups": [
+        {
+            "id": "eda3b72460c8c0d9d61a7f01b6a22e32:dbtgf28216706:tx-filip-mdb-ms-0",
+            "service_id": "dbtgf28216706",
+            "type": "full",
+            "method": "skybucket",
+            "server_pod": "tx-filip-mdb-ms-0",
+            "backup_size": 5327326,
+            "reference_full_backup": "",
+            "point_in_time": "2024-03-26 17:18:21",
+            "start_time": "2024-03-26T17:18:57Z",
+            "end_time": "2024-03-26T17:19:01Z",
+            "status": "Succeeded"
+        }
+    ],
+    "backups_count": 1,
+    "pages_count": 1
+}
+
+
+

The ** Backup id is the most important part of this data as you need to provide it in the restore api call** to schedule restore execution.

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/Physical Backup Examples/index.html b/Backup and Restore/Physical Backup Examples/index.html new file mode 100644 index 00000000..652d9add --- /dev/null +++ b/Backup and Restore/Physical Backup Examples/index.html @@ -0,0 +1,2715 @@ + + + + + + + + + + + + + + + + + + + + + + + Physical Backup Examples - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Physical Backup Examples

+ +
+ +Authentication + +

+
    +
  1. +Go to the SkySQL API Key management page and generate an API key +
  2. +
  3. +Export the value from the token field to an environment variable $API_KEY + +
    export API_KEY='... key data ...'
    +
    +
  4. +
  5. +Use it on subsequent request, e.g: + +
    curl --request GET 'https://api.skysql.com/skybackup/v1/backups/schedules' --header "X-API-Key: ${API_KEY}"
    +
    +
  6. +
+

+ +

Full(physical) Backup Scheduling

+

One-time Full(physical) Backup Example

+
    curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules' \
+    --header 'Content-Type: application/json' \
+    --header 'Accept: application/json' \
+    --header "X-API-Key: $API_KEY" \
+    --data "{
+        \"backup_type\": \"full\",
+        \"schedule\": \"once\",
+        \"service_id\": \"$SERVICE_ID\"
+        }"
+
+
    +
  • API_KEY : SKYSQL API KEY, see SkySQL API Keys
  • +
  • SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx. You can fetch the service ID from the Fully qualified domain name(FQDN) of your service. E.g: in dbpgf17106534.sysp0000.db2.skysql.com, 'dbpgf17106534' is the service ID.You will find the FQDN in the Connect window
  • +
+

Cron Full(physical) Example

+
    curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules'
+    --header 'Content-Type: application/json' \
+    --header 'Accept: application/json' \
+    --header "X-API-Key: $API_KEY" \
+    --data "{
+    \"backup_type\": \"full\",
+    \"schedule\": \"0 3 * * *\",
+    \"service_id\": \"$SERVICE_ID\"
+    }"
+
+
    +
  • API_KEY : SKYSQL API KEY, see SkySQL API Keys
  • +
  • SCHEDULE : Cron schedule, see Cron
  • +
  • SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx
  • +
+
Backup status can be fetch using 'https://api.skysql.com/skybackup/v1/backups'. See the 'Backup Status' section for an example.
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/Restore Delete Examples/index.html b/Backup and Restore/Restore Delete Examples/index.html new file mode 100644 index 00000000..4fed2904 --- /dev/null +++ b/Backup and Restore/Restore Delete Examples/index.html @@ -0,0 +1,2561 @@ + + + + + + + + + + + + + + + + + + + + + + + Restore Delete Examples - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Restore Delete Examples

+ +
+ +Authentication + +

+
    +
  1. +Go to the SkySQL API Key management page and generate an API key +
  2. +
  3. +Export the value from the token field to an environment variable $API_KEY + +
    export API_KEY='... key data ...'
    +
    +
  4. +
  5. +Use it on subsequent request, e.g: + +
    curl --request GET 'https://api.skysql.com/skybackup/v1/backups/schedules' --header "X-API-Key: ${API_KEY}"
    +
    +
  6. +
+

+

In order to delete an already scheduled Restore, users need to make the following API call:

+
curl --location --request DELETE 'https://api.skysql.com/skybackup/v1/restores/<ID>' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}'
+
+
    +
  • ID : the SkySQL Restore ID. To get the restore id, you can use the following API call:
  • +
+
curl --location 'https://api-test.skysql.com/skybackup/v1/backups?service_id=d<SERVICE_ID>' \
+  --header 'Accept: application/json' \
+  --header "X-API-Key: skysql.1zzz.mh2oe85a.5aXjdyqgef7facjgAQ6DcLlVfx8imkkybIan.87c113e7"
+
+
    +
  • SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx. + You can fetch your service ID from the Fully qualified domain name(FQDN) of your service.
    + E.g: in dbpgf17106534.sysp0000.db2.skysql.com, 'dbpgf17106534' is the service ID. You will find the FQDN in the Connect window
  • +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/Restore Examples/index.html b/Backup and Restore/Restore Examples/index.html new file mode 100644 index 00000000..dab489d6 --- /dev/null +++ b/Backup and Restore/Restore Examples/index.html @@ -0,0 +1,2585 @@ + + + + + + + + + + + + + + + + + + + Restore Examples - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Restore Examples

+ +
+ +Authentication + +

+
    +
  1. +Go to the SkySQL API Key management page and generate an API key +
  2. +
  3. +Export the value from the token field to an environment variable $API_KEY + +
    export API_KEY='... key data ...'
    +
    +
  4. +
  5. +Use it on subsequent request, e.g: + +
    curl --request GET 'https://api.skysql.com/skybackup/v1/backups/schedules' --header "X-API-Key: ${API_KEY}"
    +
    +
  6. +
+

+ +

Restore From your Bucket (External Storage)

+

You can restore your data from external storage. Your external storage bucket data should be created via one of the following tools: mariabackup, mysqldump. Credentials to external storage access could be fetched from:

+
    +
  • +

    For GCP you need to create an service account key. Please follow the steps from this documentation. Once you have created the service account key you will need to base64 encode it. You can encode it directly from a command line itself. For example the execution of command echo -n 'service-account-key' | base64 will produce the following c2VydmljZS1hY2NvdW50LWtleQ==

    +
    curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules' \
    +--header 'Content-Type: application/json' \
    +--header 'Accept: application/json' \
    +--header "X-API-Key: ${API_KEY}" \
    +--data '{
    +    "backup_type": "full",
    +    "schedule": "0 2 * * *",
    +    "service_id": "dbtgf28044362",
    +    "external_storage": {
    +        "bucket": {
    +            "path": "s3://my_backup_bucket",
    +            "credentials": "c2VydmljZS1hY2NvdW50LWtleQ=="
    +        }
    +    }
    +}'
    +
    +

    The service account key will be in the following format:

    +
    {
    +    "type": "service_account",
    +    "project_id": "XXXXXXX",
    +    "private_key_id": "XXXXXXX",
    +    "private_key": "-----BEGIN PRIVATE KEY-----XXXXX-----END PRIVATE KEY-----",
    +    "client_email": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX.iam.gserviceaccount.com",
    +    "client_id": "XXXXXXX",
    +    "auth_uri": "<https://accounts.google.com/o/oauth2/auth>",
    +    "token_uri": "<https://oauth2.googleapis.com/token>",
    +    "auth_provider_x509_cert_url": "<https://www.googleapis.com/oauth2/v1/certs>",
    +    "client_x509_cert_url": "<https://www.googleapis.com/robot/v1/metadata/x509/XXXXXXXXXXXXXX.iam.gserviceaccount.com>",
    +    "universe_domain": "googleapis.com"
    +}
    +
    +
  • +
  • +

    For AWS, you must provide your own credentials. These include the AWS access key associated with an IAM account and the bucket region. For more information about AWS credentials, please refer to the documentation. The required credentials are aws_access_key_id , aws_secret_access_key and region. For example your credentials should look like:

    +
    [default]
    +aws_access_key_id = AKIAIOSFODNN7EXAMPLE
    +aws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
    +region = us-west-2
    +
    +

    You should encode your credentials base64 before passing it to the API. You can encode it directly from a command line itself. For example the execution of command echo '[default]\naws_access_key_id = AKIAIOSFODNN7EXAMPLE\naws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\nregion = us-west-2' | base64 will produce the following W2RlZmF1bHRdCmF3c19hY2Nlc3Nfa2V5X2lkID0gQUtJQUlPU0ZPRE5ON0VYQU1QTEUKYXdzX3NlY3JldF9hY2Nlc3Nfa2V5ID0gd0phbHJYVXRuRkVNSS9LN01ERU5HL2JQeFJmaUNZRVhBTVBMRUtFWQpyZWdpb24gPSB1cy13ZXN0LTIK.

    +
  • +
+

The following request demonstrates how to restore your data from an external storage:

+
{
+  "service_id": "dbtgf28044362",
+  "key": "/backup.tar.gz",
+  "external_source": {
+    "bucket": "gs://my_backup_bucket",
+    "method": "mariabackup",
+    "credentials" "W2RlZmF1bHRdCmF3c19hY2Nlc3Nfa2V5X2lkID0gQUtJQUlPU0ZPRE5ON0VYQU1QTEUKYXdzX3NlY3JldF9hY2Nlc3Nfa2V5ID0gd0phbHJYVXRuRkVNSS9LN01ERU5HL2JQeFJmaUNZRVhBTVBMRUtFWQpyZWdpb24gPSB1cy13ZXN0LTIK"
+  }
+}
+
+

In case your backup data is encrypted you need to pass encryption key as well:

+
{
+  "service_id": "dbtgf28044362",
+  "key": "/backup.tar.gz",
+  "external_source": {
+    "bucket": "gs://my_backup_bucket",
+    "method": "mariabackup",
+    "credentials": "W2RlZmF1bHRdCmF3c19hY2Nlc3Nfa2V5X2lkID0gQUtJQUlPU0ZPRE5ON0VYQU1QTEUKYXdzX3NlY3JldF9hY2Nlc3Nfa2V5ID0gd0phbHJYVXRuRkVNSS9LN01ERU5HL2JQeFJmaUNZRVhBTVBMRUtFWQpyZWdpb24gPSB1cy13ZXN0LTIK",
+    "encryption_key": "my_encryption_key"
+  }
+}
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/Restore From Your Own Bucket/index.html b/Backup and Restore/Restore From Your Own Bucket/index.html new file mode 100644 index 00000000..13663fe7 --- /dev/null +++ b/Backup and Restore/Restore From Your Own Bucket/index.html @@ -0,0 +1,2676 @@ + + + + + + + + + + + + + + + + + + + + + + + Restore From Your Own Bucket - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Restore From Your Own Bucket

+ +
+ +Authentication + +

+
    +
  1. +Go to the SkySQL API Key management page and generate an API key +
  2. +
  3. +Export the value from the token field to an environment variable $API_KEY + +
    export API_KEY='... key data ...'
    +
    +
  4. +
  5. +Use it on subsequent request, e.g: + +
    curl --request GET 'https://api.skysql.com/skybackup/v1/backups/schedules' --header "X-API-Key: ${API_KEY}"
    +
    +
  6. +
+

+ +

Restore From your Bucket (External Storage)

+

You can restore your data from external cloud storage. +SkySQL supports restoration from both Google Cloud Storage (GCS) and Amazon S3 cloud storage buckets. +Your backup data should be created using either mariabackup or mysqldump.

+

Below is a sample restore call:

+
curl --location 'https://api.skysql.com/skybackup/v1/restores' \
+--header 'Content-Type: application/json' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}' \
+--data '{
+    "service_id": "<SERVICE_ID>",
+    "id": "<ID>",
+    "external_source": {
+      "bucket": "<GCS_URI> Š¾r <S3_URI> ",
+      "method": "<BACKUP_METHOD>",
+      "credentials": "<GCP_SERVICE_ACCOUNT_BASE64> or AWS_ACCOUNT_ACCESS_KEY_BASE64"
+    }
+  }'
+
+
    +
  • SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx. + You can fetch your service ID from the Fully qualified domain name(FQDN) of your service.
    + E.g: in dbpgf17106534.sysp0000.db2.skysql.com, 'dbpgf17106534' is the service ID. You will find the FQDN in the Connect window
  • +
  • ID : the backup data file reference, available in your GCS or S3 bucket.
  • +
+
+

Note

+

Gzip compressed file expected.

+

Example: +

gzip <backup file> -c > <backup file>.gz
+

+
+
    +
  • GCS_URI/S3_URI : the GCS/S3 bucket URI where the backup file is stored.
  • +
+

Format gs://BUCKET_NAME/ or s3://BUCKET_NAME/

+
+

Note

+

Make sure the BUCKET_NAME contains a trailing slash.

+
+
    +
  • BACKUP_METHOD : the backup method used to create the backup file. +
    Available options: mariabackup , mysqldump
  • +
  • GCP_SERVICE_ACCOUNT_BASE64/AWS_ACCOUNT_ACCESS_KEY_BASE64 : Your base64 encoded GCP service account or AWS account access key.
  • +
+

Information on how to create a GCP service account here + Storage Admin role is required for the service account attemping the restore.

+

Sample GCP service account key and command to encode it:

+
echo -n '
+{
+    "type": "service_account",
+    "project_id": "XXXXXXX",
+    "private_key_id": "XXXXXXX",
+    "private_key": "-----BEGIN PRIVATE KEY-----XXXXX-----END PRIVATE KEY-----",
+    "client_email": "XXXXXXXXXXXXXXXXXXXXXXXXXXXX.iam.gserviceaccount.com",
+    "client_id": "XXXXXXX",
+    "auth_uri": "<https://accounts.google.com/o/oauth2/auth>",
+    "token_uri": "<https://oauth2.googleapis.com/token>",
+    "auth_provider_x509_cert_url": "<https://www.googleapis.com/oauth2/v1/certs>",
+    "client_x509_cert_url": "<https://www.googleapis.com/robot/v1/metadata/x509/XXXXXXXXXXXXXX.iam.gserviceaccount.com>",
+    "universe_domain": "googleapis.com"
+} ' | base64
+
+

Sample AWS account access key and command to encode it:

+
echo -n '
+{
+    [default]
+    aws_access_key_id = XXXXXXXXXXXXXEXAMPLE
+    aws_secret_access_key = XXXXXXXXXXXXX/XXXXXXXXXXXXX/XXXXXXXXXXXXXEXAMPLEKEY
+    region = XXXXXXXXXXXXX
+} ' | base64
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/Restore Listing Examples/index.html b/Backup and Restore/Restore Listing Examples/index.html new file mode 100644 index 00000000..acb19725 --- /dev/null +++ b/Backup and Restore/Restore Listing Examples/index.html @@ -0,0 +1,2588 @@ + + + + + + + + + + + + + + + + + + + + + + + Restore Listing Examples - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Restore Listing Examples

+ + +Authentication + +

+
    +
  1. +Go to the SkySQL API Key management page and generate an API key +
  2. +
  3. +Export the value from the token field to an environment variable $API_KEY + +
    export API_KEY='... key data ...'
    +
    +
  4. +
  5. +Use it on subsequent request, e.g: + +
    curl --request GET 'https://api.skysql.com/skybackup/v1/backups/schedules' --header "X-API-Key: ${API_KEY}"
    +
    +
  6. +
+ +In order to get all Restores scheduled in the past you need to make api call: + +
curl --location 'https://api.skysql.com/skybackup/v1/restores' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}'
+
+ +## Get Restore by ID + +
curl --location 'https://api.skysql.com/skybackup/v1/restores/12' \
+--header 'Accept: application/json' \
+--header 'X-API-Key: ${API_KEY}'
+
+ +Typical response of those two apis should look like: + +In case restore is in progress: + +
[
+    {
+        "id": 12,
+        "service_id": "dbtgf28216706",
+        "bucket": "gs://sky-syst0000-backup-us-84e9d84ecf265a/orgpxw1x",
+        "key": "eda3b72460c8c0d9d61a7f01b6a22e32:dbtgf28216706:tx-filip-mdb-ms-0",
+        "type": "physical",
+        "status": "Running",
+        "message": "server is not-ready"
+    }
+]
+
+ +In case restore completed: + +
[
+    {
+        "id": 13,
+        "service_id": "dbtgf28216706",
+        "bucket": "gs://sky-syst0000-backup-us-84e9d84ecf265a/orgpxw1x",
+        "key": "dda9b72460c9c0d9d61a7f01b6a33e39:dbtgf28216706:tx-filip-mdb-ms-0",
+        "type": "physical",
+        "status": "Succeeded",
+        "message": "Restore has succeeded!"
+    }
+]
+
+ + + + + + + + + + + + + +

+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/Restore from SkySQL Managed Storage/index.html b/Backup and Restore/Restore from SkySQL Managed Storage/index.html new file mode 100644 index 00000000..2e2af1a8 --- /dev/null +++ b/Backup and Restore/Restore from SkySQL Managed Storage/index.html @@ -0,0 +1,2630 @@ + + + + + + + + + + + + + + + + + + + + + + + Restore from SkySQL Managed Storage - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Restore from SkySQL Managed Storage

+ +
+ +Authentication + +

+
    +
  1. +Go to the SkySQL API Key management page and generate an API key +
  2. +
  3. +Export the value from the token field to an environment variable $API_KEY + +
    export API_KEY='... key data ...'
    +
    +
  4. +
  5. +Use it on subsequent request, e.g: + +
    curl --request GET 'https://api.skysql.com/skybackup/v1/backups/schedules' --header "X-API-Key: ${API_KEY}"
    +
    +
  6. +
+

+ +

You can restore your database from the backup located in the default SkySQL managed backup storage. Below is a sample restore call.

+

Restore From SkySQL Managed Storage

+
curl --location 'https://api.skysql.com/skybackup/v1/restores' \
+--header 'Content-Type: application/json' \
+--header 'Accept: application/json' \
+--header "X-API-Key: ${API_KEY}" \
+--data '{
+  "key": "xxx:dbtgf28044362:xxx",
+  "service_id": "<SERVICE_ID>"
+}'
+
+
    +
  • +

    SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx. + You can fetch your service ID from the Fully qualified domain name(FQDN) of your service.
    + E.g: in dbpgf17106534.sysp0000.db2.skysql.com, 'dbpgf17106534' is the service ID. You will find the FQDN in the Connect window

    +
  • +
  • +

    KEY : the SkySQL backup key. To get the backup key, you can use the following API call:

    +
  • +
+

curl --location 'https://api-test.skysql.com/skybackup/v1/backups?service_id=d<SERVICE_ID>' \
+  --header 'Accept: application/json' \
+  --header "X-API-Key: skysql.1zzz.mh2oe85a.5aXjdyqgef7facjgAQ6DcLlVfx8imkkybIan.87c113e7"
+
+Key Format: \w*SERVICE_ID\w* , where \w*: Matches zero or more alphanumeric characters.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/Snapshot Backup Examples/index.html b/Backup and Restore/Snapshot Backup Examples/index.html new file mode 100644 index 00000000..61e6052d --- /dev/null +++ b/Backup and Restore/Snapshot Backup Examples/index.html @@ -0,0 +1,2715 @@ + + + + + + + + + + + + + + + + + + + + + + + Snapshot Backup Examples - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Snapshot Backup Examples

+ +
+ +Authentication + +

+
    +
  1. +Go to the SkySQL API Key management page and generate an API key +
  2. +
  3. +Export the value from the token field to an environment variable $API_KEY + +
    export API_KEY='... key data ...'
    +
    +
  4. +
  5. +Use it on subsequent request, e.g: + +
    curl --request GET 'https://api.skysql.com/skybackup/v1/backups/schedules' --header "X-API-Key: ${API_KEY}"
    +
    +
  6. +
+

+ +

Snapshot Backup Scheduling

+

One-time Snapshot Example

+
    curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules' \
+    --header 'Content-Type: application/json' \
+    --header 'Accept: application/json' \
+    --header "X-API-Key: $API_KEY" \
+    --data "{
+        \"backup_type\": \"snapshot\",
+        \"schedule\": \"once\",
+        \"service_id\": \"$SERVICE_ID\"
+        }"
+
+
    +
  • API_KEY : SKYSQL API KEY, see SkySQL API Keys
  • +
  • SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx. You can fetch the service ID from the Fully qualified domain name(FQDN) of your service. E.g: in dbpgf17106534.sysp0000.db2.skysql.com, 'dbpgf17106534' is the service ID.You will find the FQDN in the Connect window
  • +
+

Cron Snapshot Example

+
    curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules'
+    --header 'Content-Type: application/json' \
+    --header 'Accept: application/json' \
+    --header "X-API-Key: $API_KEY" \
+    --data "{
+    \"backup_type\": \"snapshot\",
+    \"schedule\": \"0 3 * * *\",
+    \"service_id\": \"$SERVICE_ID\"
+    }"
+
+
    +
  • API_KEY : SKYSQL API KEY, see SkySQL API Keys
  • +
  • SCHEDULE : Cron schedule, see Cron
  • +
  • SERVICE_ID : SkySQL serivce identifier, format dbtxxxxxx
  • +
+
Backup status can be fetch using 'https://api.skysql.com/skybackup/v1/backups'. See the 'Backup Status' section for an example.
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Backup and Restore/index.html b/Backup and Restore/index.html new file mode 100644 index 00000000..b4fb1cf7 --- /dev/null +++ b/Backup and Restore/index.html @@ -0,0 +1,2961 @@ + + + + + + + + + + + + + + + + + + + + + + + Backup and Restore - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Backup and Restore

+

Pricing

+
+

While daily automated backups are provided, using the SkySQL Backup and Restore API may incur nominal additional charges. For more information, please contact info@skysql.com."

+
+

The following documentation describes the API for the SkySQL Backup Service. This can be used directly with any HTTP client.

+

The Backup and Restore service provides SkySQL customers with a comprehensive list of features through a secure API and a user-friendly portal. The service extends the automated nightly backups with a number of self-service features. Users can automatically create and store backups of their databases to ensure additional data safety or provide a robust disaster recovery solution. The backups are stored on reliable and secure cloud storage, ensuring they are readily available when needed. The backup process is seamless and does not affect the database performance. SkySQL also offers the flexibility to customize backup schedule according to your specific needs. Backups on large data sets can take time.

+

You instruct the creation of a backup using a "schedule". You can either schedule a one-time backup (schedule now) or set up automatic backups using a cron schedule. A backup schedule results in a backup job which can be tracked using the status API. We support the following types of backups: snapshot, full (physical), incremental (physical), binary log, and dump (logical).

+

Backup

+

SkySQL Snapshot Backups

+
+ +Overview + + +

+
  • +SkySQL database snapshots create a point-in-time copy of the database persistent volume. Compared to full backups, snapshots provide a faster method for restoring your database with the same data. +
  • + +
  • +Snapshots are incremental in nature. After the initial full snapshot of a database persistent volumes, subsequent snapshots only capture and store the changes made since the last snapshot. This approach saves a lot of storage space and reduces the time it takes to create a snapshot database backup and the related cloud storage cost. +
  • +
  • +Users have the flexibility to trigger a snapshot as per their scheduling requirements - either on-demand or according to a pre-defined schedule. +
  • +
  • +The SkySQL snapshots benefit from MariaDB's [backup stage flush](https://mariadb.com/kb/en/backup-stage/#:~:text=active%20DDL%20commands.-,BACKUP%20STAGE%20FLUSH,as%20closed%20for%20the%20backup.) to create a consistent backup of the database - database lock temporarily suspends write operations and replication for just a few seconds. In a Primary/Replica topology, snapshot backups are prioritized and performed on the replica node. This is to ensure that the primary server can continue to operate in read/write mode, as the backup process is carried out on the replica node. After the backup process on the replica is completed, replication resumes automatically. +
  • +

    +
    + +
    Snapshot Backup Examples
    +

    SkySQL supports database snapshot backups either on-demand or according to a pre-established schedule. +Below are examples of how to schedule a snapshot backup using the SkySQL API.

    + +

    Important: Database snapshots are deleted immediately upon service deletion.

    +
    + +References + +

    + +

    +
    + +

    Full (physical) Backups

    +
    + +Overview + + +

    +
  • +Full backups create a complete backup of the database server into a new backup folder. It uses [mariabackup](https://mariadb.com/kb/en/full-backup-and-restore-with-mariabackup/) under the hood. Physical backups are performed by copying the individual data files or directories. +
  • + +
  • +The physical backup uses backup stages to create a consistent backup of the database without requiring a global read lock for the entire duration of the backup, while allowing the database to continue processing transactions. Instead, the server read lock is only needed briefly during the [BACKUP STAGE FLUSH](https://mariadb.com/kb/en/backup-stage/#:~:text=active%20DDL%20commands.-,BACKUP%20STAGE%20FLUSH,as%20closed%20for%20the%20backup.) stage, which flushes the tables to ensure that all of them are in a consistent state at the exact same point in time, independent of storage engine. The database lock temporarily suspends write operations and replication; the duration of the lock is typically just a few seconds. In a Primary/Replica topology, backups are prioritized and performed on the replica node. This approach ensures that the primary server can continue to operate in read/write mode, as the backup process is carried out on the replica node. After the backup process on the replica is completed, replication resumes automatically. +
  • +

    +
    +

    Full (physical) Backup Examples

    +

    SkySQL supports database physical backups either on-demand or according to a pre-established schedule. Below are examples of how to schedule a physical backup using the SkySQL API.

    + +
    + +References + +

    + +

    +
    + +

    Incremental Backups

    +
    + +Overview + + +

    +Incremental backups update a previous backup with any changes to the data that have occurred since the initial backup was taken. + +InnoDB pages contain log sequence numbers, or LSN's. Whenever you modify a row on any InnoDB table in the database, the storage engine increments this number. When performing an incremental backup, Mariabackup checks the most recent LSN for the backup against the LSN's contained in the database. It then updates any of the backup files that have fallen behind. + +

    +
    + +
    Incremental Backup Examples
    +

    SkySQL supports database incremental backups either on-demand or according to a pre-established schedule. +Below are examples of how to schedule an incremental backup using the SkySQL API.

    + +

    Logical (Mariadb-dump) Backups

    +
    + +Overview + + +

    +Logical backups consist of the SQL statements necessary to restore the data, such as CREATE DATABASE, CREATE TABLE, and INSERT. This is done using mariadb-dump ([mariadb-dump](https://mariadb.com/kb/en/mariadb-dump/)) and is the most flexible way to perform a backup and restore, and a good choice when the data size is relatively small. +

    +
    + +

    Logical Backup Examples

    +

    SkySQL supports database logical backups either on-demand or according to a pre-established schedule. Below are examples of how to schedule a logical backup using the SkySQL API.

    + +
    + +References + + +

    +[mariadb-dump](https://mariadb.com/kb/en/mariadb-dump/) + +

    +
    + +

    BinaryLog Backups

    +
    + +Overview + + +

    +Binlogs record database changes (data modifications, table structure changes) in a sequential, binary format. You can preserve binlogs for setting up replication or to recover to a certain point-in-time. + +

    +
    + +

    BinaryLog Backup Examples

    + +

    Additional Backup Options (with Examples)

    +
      +
    • Replication as Backup : In situations where the service cannot be locked or stopped, or is under heavy load, performing backups directly on a primary server may not be the preferred option. Using a replica database instance for backups allows the replica to be shut down or locked, enabling backup operations without impacting the primary server. + + The approach is commonly implemented in the following manner: + - The primary server replicates data to a replica. + - Backups are then initiated from the replica, ensuring no disruption to the primary server. + + Details on how to set up replication with your SkySQL instance can be found [here](../Data%20loading%2C%20Migration/Replicating%20data%20from%20external%20DB/). +
    • +
    • Automatic Nightly Backups : Automated nightly backups include a full backup of every database in the service to ensure that your SkySQL Database service is backed up regularly. Nightly backups are running for every SkySQL database by default. +
    • +
    • Bring Your Own Bucket (BYOB) : You can backup or restore data to/from your own bucket in either GCP or AWS. Sample GCP and AWS scripts can be found [here](../Backup%20and%20Restore/Bring%20Your%20Own%20Bucket%20Examples/). +
    • +
    • Point-in-time Recovery : You can restore from a full or a logical backup and then use a binlog backup to restore to a point-in-time. +
    • +
    • Secure Backup/Restores : Control backup/restore privileges by granting roles to users in SkySQL. +
    • + +
    • Other Backup API Examples : Various API scripts providing examples of listing backups, checking backup statuses, and working with backup schedules can be found [here](../Backup%20and%20Restore/Other%20backup%20API%20examples/). +
    • +
    + +

    Restores

    +

    WARNING

    +
    +

    Restoring from a backup will erase all data in your target DB service. If you are uncertain, it is advisable to first create a backup of the DB service before initiating the restore process. Consider restoring to a new database instance as a preferred approach. The database being restored will be temporarily stopped during the restoration.

    +
    +

    Users can instruct the restore of their SkySQL Database from their own SkySQL storage or from an external storage they own. The restore API provides options for listing, adding, and deleting a scheduled restore operation.

    +

    List Restore Schedules

    +

    SkySQL Users can fetch their already existing database restore schedules using the backup API. Check the provided API examples for details.

    +

    Restore List Examples

    + +

    Create a Restore

    +

    SkySQL Users can restore their databases using their own SkySQL managed backup storage or using an external storage they own. Check the provided service API examples for details.

    +

    Database Restore Examples

    + +

    Delete Restore Schedule

    +

    SkySQL Users can delete their already defined database restore schedules with the provided service API.

    +

    Delete Restore Examples

    + + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Billing and Power Tier/Maintenance Windows/index.html b/Billing and Power Tier/Maintenance Windows/index.html new file mode 100644 index 00000000..59234a59 --- /dev/null +++ b/Billing and Power Tier/Maintenance Windows/index.html @@ -0,0 +1,2548 @@ + + + + + + + + + + + + + + + + + + + Maintenance Windows - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Maintenance Windows

    +

    Maintenance windows are a scheduled period of time when hardware, network, software, or configuration changes can be applied, and processes can be restarted.

    +

    For Foundation Tier customers, maintenance windows are predefined by region.

    +

    For Power Tier customers, maintenance windows are customer-selected.

    +

    Notice is provided to customers in advance of maintenance. SkyDBA customers are asked to confirm maintenance prior to scheduled start.

    +

    View Current Maintenance Window

    +

    For Foundation Tier, to view the maintenance widown, Go to "Your services" and then "Details".

    +

    On Power Tier, to show the current maintenance window for a service:

    +
      +
    1. +

      Go to "Your services" page (top choice in left navigation).

      +
    2. +
    3. +

      Click the ">" to the left of the desired service.

      +
    4. +
    5. +

      The current maintenance window is shown in the "Customization" section.

      +
    6. +
    +

    Choose Maintenance Window

    +

    A maintenance window selection applies to all services within a region.

    +

    For Power Tier customers, maintenance windows are customer-selected:

    +
      +
    • +

      A menu of supported maintenance windows is presented at time of service launch.

      +
    • +
    • +

      Maintenance windows may be changed after service launch, once a service reaches "Healthy" state.

      +
    • +
    +

    Maintenance windows can be updated at time of service launch, or on-demand:

    +
      +
    1. +

      Go to "Your services" page (top choice in left navigation).

      +
    2. +
    3. +

      Click the ">" to the left of the desired service.

      +
    4. +
    5. +

      The current maintenance window is shown in the "Customization" section.

      +
    6. +
    7. +

      Click on the triangle at the right of the "Maintenance window" to see a drop down display of available maintenance windows.

      +
    8. +
    9. +

      Select the desired maintenance window and click on the "Save" button. This change will be applied to all services in the region.

      +
    10. +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Billing and Power Tier/Maxscale Redundancy/index.html b/Billing and Power Tier/Maxscale Redundancy/index.html new file mode 100644 index 00000000..bab3f5c9 --- /dev/null +++ b/Billing and Power Tier/Maxscale Redundancy/index.html @@ -0,0 +1,2533 @@ + + + + + + + + + + + + + + + + + + + MaxScale Redundancy - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    MaxScale Redundancy

    +

    MariaDB MaxScale serves as the load balancer in certain SkySQL topologies.

    +

    SkySQL supports MaxScale Redundancy as an option at time of launch:

    +
      +
    • +

      This feature is not enabled by default. By default, topologies that use MaxScale contain only one MaxScale node.

      +
    • +
    • +

      When MaxScale Redundancy is selected, MaxScale nodes are deployed in a highly available (HA) active-active configuration behind round robin load balancing.

      +
    • +
    • +

      When MaxScale Redundancy is enabled, MaxScale instance size can be selected.

      +
    • +
    • +

      MaxScale Redundancy is available to Power Tier customers.

      +
    • +
    +

    Compatibility

    +
      +
    • Replicated Transactions
    • +
    +

    Enable MaxScale Redundancy

    +
      +
    1. +

      Launch a SkySQL service:

      +
    2. +
    3. +

      Check the "Enable MaxScale Redundancy" checkbox.

      +
    4. +
    5. +

      Choose the MaxScale instance size

      +
    6. +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Billing and Power Tier/Pricing/index.html b/Billing and Power Tier/Pricing/index.html new file mode 100644 index 00000000..65532112 --- /dev/null +++ b/Billing and Power Tier/Pricing/index.html @@ -0,0 +1,2535 @@ + + + + + + + + + + + + + + + + + + + Pricing - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Pricing

    +

    SkySQL pricing information is shown at time of service launch.

    +

    Pricing shown within the SkySQL interface is an estimate of the cost of using SkySQL services based on your specified usage parameters. Pricing information is shown at time of service launch and subsequently as estimated charges.

    +

    The cost shown is not a quote and does not guarantee the cost for your actual use of SkySQL services.

    +

    Estimated Pricing

    +

    The cost estimate may vary from your actual costs for several reasons, including:

    +
      +
    • +

      Actual usage: Your actual cost will be based on your actual use of the services, not the estimate.

      +
    • +
    • +

      Region: The prices for SkySQL services may vary between regions. Using a different region than the one selected may affect the results.

      +
    • +
    • +

      Price changes: On-demand pricing for most services may change over time. If you buy services on-demand, your bill may be less or more than estimated based on the current on-demand rates.

      +
    • +
    • +

      Taxes: The estimate does not include any taxes that may be applied to your purchase of the services.

      +
    • +
    • +

      Time frame assumptions: On-demand monthly pricing assumes that your instance or node runs for a 730 hour month. The estimate does not account for leap years, which add one additional day (24 hours).

      +
    • +
    • +

      Promotional credits and discounts: The estimate does not account for promotional credits or other discounts.

      +
    • +
    • +

      Monthly billing period: MariaDB bills on a monthly basis. If your utilization starts mid-month, you will only see a portion of an actual month's full costs on your invoice.

      +
    • +
    • +

      Rounding: Estimated fees include mathematical rounding of pricing data.

      +
    • +
    • +

      Scale fabric: Scale fabric costs represent the additional network, host, and backup infrastructure needed to support multi-node topologies. In addition to multi-node topologies, this cost also applies to the Distributed Transactions topology when deployed with 1 Xpand node, since infrastructure is present to support scale-up to a multi-node configuration.

      +
    • +
    • +

      Previous services: The estimate is only for the service being launched and does not account for other current or previous charges to the SkySQL account.

      +
    • +
    • +

      Cross-Region Replicas: The estimate does not include cross-region replicas.

      +
    • +
    • +

      Currency: Estimates are provided in either US dollars or Euros depending on your account address country. Your actual cost will be based on US dollar pricing with a conversion to Euros depending on your account address country.

      +
    • +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Billing and Power Tier/index.html b/Billing and Power Tier/index.html new file mode 100644 index 00000000..d2abda9c --- /dev/null +++ b/Billing and Power Tier/index.html @@ -0,0 +1,2564 @@ + + + + + + + + + + + + + + + + + + + + + + + Billing and Power Tier - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Billing and Power Tier

    +
    +

    Note

    +

    COMING SOON ā€¦ a complete feature checklist of both the tiers ... +A full description of billing and invoicing (FAQ covers a bit)

    +
    +

    Power Tier is a premium service offering who have the most critical requirements for uptime, availability, performance, and support.

    +

    Upgrade to Power Tier

    +

    By default, any new signed up users are in the ā€œFoundation Tierā€. To upgrade to Power Tier,Ā simply click the ā€˜Upgradeā€™ button - SkySQL support will contact you and start the upgrade process. You can also directly reach out to SkySQL Support.

    +

    Features

    +

    Features available to SkySQL Power Tier customers include:

    + + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Connecting to Sky DBs/Connect from Java App/index.html b/Connecting to Sky DBs/Connect from Java App/index.html new file mode 100644 index 00000000..15694ef2 --- /dev/null +++ b/Connecting to Sky DBs/Connect from Java App/index.html @@ -0,0 +1,2612 @@ + + + + + + + + + + + + + + + + + + + + + + + Connect from Java App - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Connect from Java App

    +

    MariaDB Connector/J enables Java applications to connect to SkySQL using a native MariaDB connector.

    +

    Install MariaDB Connector/J via JAR

    +

    To download the JAR file manually:

    +
      +
    1. Go to theĀ MariaDB Connector/J download page
    2. +
    3. Within the "Product" dropdown, choose the "Java 8+ connector".
    4. +
    5. In the "Version" dropdown, choose the desired version.
    6. +
    7. Click the "Download" button to download the JAR file.
    8. +
    9. When the JAR file finishes downloading, place it into the relevant directory on your system.
    10. +
    11. Similarly, install dependency JAR files, if any used.
    12. +
    +

    Install MariaDB Connector/J via Maven

    +

    Maven can install MariaDB Connector/J as a dependency of your application during build. Set theĀ <version>Ā element to correspond to the version of MariaDB Connector/J that you would like to install.

    +

    To use Maven to install MariaDB Connector/J, add the dependency to yourĀ pom.xmlĀ file:

    +
    <dependency>
    +   <groupId>org.mariadb.jdbc</groupId>
    +   <artifactId>mariadb-java-client</artifactId>
    +   <version>3.4.1</version>
    +</dependency>
    +
    +

    For additional information on available releases, see the "Release Notes for MariaDB Connector/J".

    +

    Depending on the features you plan to use, you may need to add some additional dependencies toĀ pom.xml.

    +

    If you downloaded the connector JAR, place it on your CLASSPATH

    +
    export CLASSPATH="/path/to/application:/path/to/mariadb-java-client-3.4.1.jar"
    +
    +

    Connector/J 3.0

    +

    In MariaDB Connector/J 3.0, TLS is enabled for connections to SkySQL using theĀ sslModeĀ parameter.

    +
    import java.sql.*;
    +import java.util.Properties;
    +
    +public class App {
    +    public static void main(String[] argv) {
    +        Properties connConfig = new Properties();
    +        connConfig.setProperty("user", "db_user");
    +        connConfig.setProperty("password", "db_user_password");
    +        **connConfig.setProperty("sslMode", "verify-full");**
    +
    +        try (Connection conn = DriverManager.getConnection("jdbc:mariadb://HOST:PORT", connConfig)) {
    +            try (Statement stmt = conn.createStatement()) {
    +                try (ResultSet contact_list = stmt.executeQuery("SELECT first_name, last_name, email FROM test.contacts")) {
    +                    while (contact_list.next()) {
    +                        System.out.println(String.format("%s %s <%s>",
    +                            contact_list.getString("first_name"),
    +                            contact_list.getString("last_name"),
    +                            contact_list.getString("email")));
    +                    }
    +                }
    +            }
    +        } catch (Exception e) {
    +            e.printStackTrace();
    +        }
    +    }
    +}
    +
    +

    Connector/J 2.7

    +

    In MariaDB Connector/J 2.7 and before, TLS is enabled for connections to SkySQL using theĀ useSslĀ parameter.

    +
    import java.sql.*;
    +import java.util.Properties;
    +
    +public class App {
    +    public static void main(String[] argv) {
    +        Properties connConfig = new Properties();
    +        connConfig.setProperty("user", "db_user");
    +        connConfig.setProperty("password", "db_user_password");
    +        **connConfig.setProperty("useSsl", "true");**
    +
    +        try (Connection conn = DriverManager.getConnection("jdbc:mariadb://HOST:PORT", connConfig)) {
    +            try (Statement stmt = conn.createStatement()) {
    +                try (ResultSet contact_list = stmt.executeQuery("SELECT first_name, last_name, email FROM test.contacts")) {
    +                    while (contact_list.next()) {
    +                        System.out.println(String.format("%s %s <%s>",
    +                            contact_list.getString("first_name"),
    +                            contact_list.getString("last_name"),
    +                            contact_list.getString("email")));
    +                    }
    +                }
    +            }
    +        } catch (Exception e) {
    +            e.printStackTrace();
    +        }
    +    }
    +}
    +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Connecting to Sky DBs/Connect from MongoDB clients/index.html b/Connecting to Sky DBs/Connect from MongoDB clients/index.html new file mode 100644 index 00000000..de627260 --- /dev/null +++ b/Connecting to Sky DBs/Connect from MongoDB clients/index.html @@ -0,0 +1,2549 @@ + + + + + + + + + + + + + + + + + + + + + + + Connect from MongoDB clients - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Connect from MongoDB clients

    +

    The NoSQL protocol module allows a MariaDB server or cluster to execute transactions for applications using MongoDB client libraries, transparently converting MongoDB API calls into the equivalent SQL. The MariaDB responses are then converted into the format expected by the MongoDBĀ® client library and application.

    +

    For detailed information on supported commands, see "NoSQL Protocol Module" in MariaDB MaxScale documentation.

    + + +

    Enable Support for NoSQL

    +
      +
    1. WhenĀ launchingĀ Enterprise Server With Replica(s), after defining the service name, expand the "Additional options" section.
    2. +
    3. Check the "Enable support for NoSQL" checkbox.
    4. +
    +

    Available Clients

    +

    Connect to the NoSQL interface using a MongoDB client library or compatible application.Ā Documentation on official MongoDB librariesĀ is available from MongoDB.

    +

    Documentation on installingĀ mongoshĀ (the MongoDB Shell)Ā is available from MongoDB.

    +

    Connection Parameters

    +

    From the Dashboard, the details needed to connect to your SkySQL service can be seen by clicking on the "CONNECT" button for the desired service.

    +

    The "NoSQL port" is the TCP port used to connect to the NoSQL interface.

    +

    TheĀ firewallĀ must be configured to allowlist the client's IP address or netblock before connections can occur.

    +

    See the "Connecting using Mongosh" section of the Connect page for an exampleĀ mongoshĀ command-line, authentication instructions, and instructions to change the default password.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Connecting to Sky DBs/Connect from Node js App/index.html b/Connecting to Sky DBs/Connect from Node js App/index.html new file mode 100644 index 00000000..ae1ca962 --- /dev/null +++ b/Connecting to Sky DBs/Connect from Node js App/index.html @@ -0,0 +1,2761 @@ + + + + + + + + + + + + + + + + + + + + + + + Connect from Node.js App - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Connect from Node.js App

    +

    Node.js developers can connect to SkySQL through a native MariaDB Connector. Using MariaDB Connector/Node.js you can connect to SkySQL to use and administer databases from within your Node.js application.

    +

    Install MariaDB Connector/Node.js

    +

    MariaDB Connector/Node.js is usually installed either from the Node.js repository or manually from the source code package.

    +

    Install MariaDB Connector/Node.js via Repository

    +

    To install MariaDB Connector/Node.js from the Node.js repository, use NPM:

    +
    npm install mariadb
    +
    +

    NPM connects to the Node.js repository and downloads MariaDB Connector/Node.js and all relevant dependencies into theĀ node_modules/Ā directory.

    +

    Install MariaDB Connector/Node.js via Source Code

    +

    To download and install the MariaDB Connector/Node.js manually from source code:

    +
      +
    1. Go to the MariaDB Connectors download page: +
    2. +
    3. In the "Product" dropdown, select the Node.js connector.
    4. +
    5. Click the "Download" button to download the source code package
    6. +
    7. +

      When the source code package finishes downloading, install it with NPM:

      +

      $ npm install mariadb-connector-nodejs-*.tar.gz

      +
    8. +
    +

    NPM untars the download and installs MariaDB Connector/Node.js in theĀ node_modules/Ā directory.

    +
    +

    Connect with MariaDB Connector/Node.js (Callback API)

    +

    Node.js developers can use MariaDB Connector/Node.js to establish client connections with SkySQL.

    +

    Require Callback API

    +

    MariaDB Connector/Node.js provides two different connection implementations: one built on theĀ Promise APIĀ and the other built on the Callback API.

    +

    To use the Callback API, use the following module:

    +
    const** mariadb = require**(**'mariadb/callback'**);
    +
    +

    Connect

    +

    createConnection(options)Ā ->Ā Connection
    +
    +is the base function used to create aĀ ConnectionĀ object.

    +

    TheĀ createConnection(options)Ā function returns aĀ ConnectionĀ object.

    +

    Determine theĀ connection informationĀ for your SkySQL database service:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    OptionDescription
    hostThe fully Qualified Domain Name from the "Connect" window in SkySQL portal
    portThe Read-Write Port or Read-Only Port from the "Connect" window in SkySQL portal
    userThe desired username, which might be the default username in the Service Credentials view
    passwordThe user's password, which might be the default password in the Service Credentials view if it was not yet customized
    databaseDatabase name to establish a connection to. No default is configured.
    connectTimeoutConnection timeout in milliseconds. In Connector/Node.js 2.5.6, the default value changed to 1000. The default value for earlier versions is 10000.
    rowsAsArrayA boolean value to indicate whether to return result sets as array instead of the default JSON. Arrays are comparatively faster.
    +

    Code Example: Connect

    +

    The following code example connects using the database and user account created in theĀ example setup:

    +
    const mariadb = require('mariadb/callback');
    +
    +// Certificate Authority (CA)",
    +var serverCert = [fs.readFileSync(process.env.SKYSQL_CA_PEM, "utf8")];
    +
    +// Declare async function
    +function main() {
    +   let conn;
    +
    +   try {
    +      conn = mariadb.createConnection({
    +         host: "example.skysql.com",
    +         port: 5009,
    +         ssl: { ca: serverCert },
    +         user: "db_user",
    +         password: "db_user_password",
    +         database: "test",
    +      });
    +
    +      // Use Connection
    +      // ...
    +   } catch (err) {
    +      // Manage Errors
    +      console.log("SQL error in establishing a connection: ", err);
    +   } finally {
    +      // Close Connection
    +      if (conn) conn.end(err => {if(err){
    +         console.log("SQL error in closing a connection: ", err);}
    +      });
    +   }
    +}
    +
    +main();
    +
    +
      +
    • AĀ try...catch...finallyĀ statement is used for exception handling.
    • +
    • New connections are created in auto-commit mode by default.
    • +
    • When you are done with a connection, close it to free resources. Close the connection using theĀ connection.end([callback])Ā function.
    • +
    • The script calls theĀ connection.end([callback])Ā function to close/end the connection in theĀ finallyĀ block after the queries that are running have completed.
    • +
    • TheĀ end()Ā function takes a callback function that defines one implicit argument for theĀ ErrorĀ object if thrown in closing the connection as argument. If no error is generated in closing a connection theĀ ErrorĀ object isĀ null.
    • +
    +
    +

    Connect with MariaDB Connector/Node.js (Promise API)

    +

    Node.js developers can use MariaDB Connector/Node.js to establish client connections with SkySQL.

    +

    Require Promise API

    +

    MariaDB Connector/Node.js provides two different connection implementations: one built on the Promise API and the other built on theĀ Callback API. Promise is the default.

    +

    To use the Promise API, use theĀ mariadbĀ module:

    +
    const** mariadb = require**(**'mariadb'**);
    +
    +

    Connect

    +

    createConnection(options)Ā ->Ā PromiseĀ is the base function used to create aĀ ConnectionĀ object.

    +

    TheĀ createConnection(options)Ā returns aĀ PromiseĀ that resolves to aĀ ConnectionĀ object if no error occurs, and rejects with anĀ ErrorĀ object if an error occurs.

    +

    Determine theĀ connection informationĀ for your SkySQL database service:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    OptionDescription
    hostThe fully Qualified Domain Name from the "Connect" window in SkySQL portal
    portThe Read-Write Port or Read-Only Port from the "Connect" window in SkySQL portal
    userThe desired username, which might be the default username in the Service Credentials view
    passwordThe user's password, which might be the default password in the Service Credentials view if it was not yet customized
    databaseDatabase name to establish a connection to. No default is configured.
    connectTimeoutConnection timeout in milliseconds. In Connector/Node.js 2.5.6, the default value changed to 1000. The default value for earlier versions is 10000.
    rowsAsArrayA boolean value to indicate whether to return result sets as array instead of the default JSON. Arrays are comparatively faster.
    +

    Create a file namedĀ .envĀ to store your database credentials:

    +
    MDB_HOST = 192.0.2.50
    +MDB_PORT = 3306
    +MDB_USER = db_user
    +MDB_PASS = db_user_password
    +MDB_HOST = example.skysql.com
    +MDB_PORT = 5001
    +MDB_CA_PEM = /path/to/skysql_chain.pem
    +MDB_USER = db_user
    +MDB_PASS = db_user_password
    +
    +

    Code Example: Connect

    +

    The following code example connectsĀ using the database and user account created inĀ Setup for Examples:

    +
    // Required Modules
    +const fs = require("fs");
    +const mariadb = require("mariadb");
    +require("dotenv").config()
    +
    +// Certificate Authority (CA)
    +const serverCert = [fs.readFileSync(process.env.MDB_CA_PEM, "utf8")];
    +
    +// Declare async function
    +async function main() {
    +   let conn;
    +
    +   try {
    +      conn = await mariadb.createConnection({
    +         host: process.env.MDB_HOST,
    +         port: process.env.MDB_PORT,
    +         user: process.env.MDB_USER,
    +         password: process.env.MDB_PASS,
    +         ssl: { ca: serverCert },
    +         database: "test",
    +      });
    +
    +      // Use Connection
    +      // ...
    +   } catch (err) {
    +      // Manage Errors
    +      console.log("SQL error in establishing a connection: ", err);
    +   } finally {
    +      // Close Connection
    +      if (conn) conn.close();
    +   }
    +}
    +
    +main();
    +
    +
      +
    • Load theĀ mariadbĀ module using theĀ require()Ā function.
    • +
    • Declare an async function calledĀ main()Ā using theĀ asyncĀ keyword.
    • +
    • An async function provides asynchronous, Promise-based code behavior.
    • +
    • Async functions may declareĀ awaitĀ expressions using theĀ awaitĀ keyword.
    • +
    • Await expressions yield control to a promise-based asynchronous operation.
    • +
    • Await expressions resume control after the awaited operation is either fulfilled or rejected.
    • +
    • The return value of anĀ awaitĀ expression is the resolved value of theĀ Promise.
    • +
    • The async function nameĀ mainĀ is arbitrary and does not have special meaning as in some other programming languages.
    • +
    • Declare a variable calledĀ connĀ for the connection to be created using aĀ letĀ statement with the async functionĀ main.
    • +
    • AĀ try...catch...finallyĀ statement is used for exception handling.
    • +
    • New connections are by default created in auto-commit mode.
    • +
    • In theĀ tryĀ block, create a new connection using theĀ mariadb#createConnection(options)Ā function in the Promise API.
    • +
    • Send error messages if any to the console in theĀ catchĀ block.
    • +
    • When you are done with a connection, close it to free resources. Close the connection using theĀ close()Ā function.
    • +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Connecting to Sky DBs/Connect from Python App/index.html b/Connecting to Sky DBs/Connect from Python App/index.html new file mode 100644 index 00000000..cdb887a0 --- /dev/null +++ b/Connecting to Sky DBs/Connect from Python App/index.html @@ -0,0 +1,2870 @@ + + + + + + + + + + + + + + + + + + + + + + + Connect from Python App - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Connect from Python App

    +

    Overview

    +

    Python developers can use MariaDB Connector/Python to establish client connections with SkySQL.

    +

    Connections

    +

    Connections are managed using the following Python class:

    + + + + + + + + + + + + + +
    ClassDescription
    ConnectionRepresents a connection to SkySQL.
    +

    Connections are created, used, and managed using the following Connection class functions:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    FunctionDescription
    connect()Establishes a connection to a database server and returns a connection object.
    cursor()Returns a new cursor object for the current connection.
    change_user()Changes the user and default database of the current connection.
    reconnect()Tries to make a connection object active again by reconnecting to the server using the same credentials which were specified in connect() method.
    close()Closes the connection.
    +

    Determine the connection information for your SkySQL database service:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    connect() parameterWhere to find it
    userDefault username in the Service Credentials view, or the username you created
    passwdDefault password in the Service Credentials view, the password you set on the default user, or the password for the user you created
    hostFully Qualified Domain Name in the Connection Parameters Portal
    ssl_verify_certSet to True to support SSL
    portRead-Write Port or Read-Only Port in the Connection Parameters Portal
    +

    Code Example: Connect

    +

    The following code example connects to an example server.

    +

    Examples:

    +
    # Module Import
    +import mariadb
    +import sys
    +
    +# Instantiate Connection
    +try:
    +   conn = mariadb.connect(
    +      host="192.0.2.1",
    +      port=3306,
    +      user="db_user",
    +      password="USER_PASSWORD")
    +except mariadb.Error as e:
    +   print(f"Error connecting to the database: {e}")
    +   sys.exit(1)
    +
    +# Use Connection
    +# ...
    +
    +# Close Connection
    +conn.close()
    +
    +
    # Module Import
    +import mariadb
    +import sys
    +
    +# Instantiate Connection
    +try:
    +   conn = mariadb.connect(
    +      host="SKYSQL_SERVICE.mdb0000001.db.skysql.com",
    +      port=5009,
    +      ssl_verify_cert=True,
    +      user="DB00000001",
    +      password="USER_PASSWORD")
    +except mariadb.Error as e:
    +   print(f"Error connecting to the database: {e}")
    +   sys.exit(1)
    +
    +# Use Connection
    +# ...
    +
    +# Close Connection
    +conn.close()
    +
    +
      +
    • The connect() function returns an instance of the Connection class, which is assigned to the conn variable.
    • +
    • The connection attributes are passed as keyword arguments to theconnect()function.
    • +
    • When you are done with a connection, close it to free resources. Close the connection using the close()method.
    • +
    +

    Multiple Connections

    +

    Instantiating the Connection class creates a single connection to MariaDB database products. Applications that require multiple connections may benefit from pooling connections.

    +

    Close a Connection

    +

    MariaDB Connector/Python closes the connection as part of the class's destructor, which is executed when an instance of the class goes out of scope. This can happen in many cases, such as:

    +
      +
    • When the program exits
    • +
    • When the instance of the Connection class is defined in the local scope of a function, and the function returns
    • +
    • When the instance of the Connection class is defined as an attribute of a custom class's instance, and the custom class's instance goes out of scope.
    • +
    +

    Connections can also be explicitly closed using the close() method, which is helpful when the connection is no longer needed, but the variable is still in scope.

    +

    Connection Failover

    +

    Starting with MariaDB Connector/Python 1.1 when MariaDB Connector/Python is built with MariaDB Connector/C 3.3, the connector supports connection failover when auto_reconnect is +enabled and the connection string contains a comma-separated list of multiple server addresses.

    +

    To enable connection failover:

    +
      +
    • Call the mariadb.connect function with the host argument specified as a comma-separated list containing multiple server addresses. The connector attempts to connect to the addresses in the order specified in the list.
    • +
    • Set auto_reconnect to True. If the connection fails, the connector will attempt to reconnect to the addresses in the order specified in the list.
    • +
    +

    The following code example connects with connection failover enabled:

    +
    # Module Import
    +import mariadb
    +import sys
    +
    +# Instantiate Connection
    +try:
    +   conn = mariadb.connect(
    +      host="192.0.2.1,192.0.2.0,198.51.100.0",
    +      port=3306,
    +      user="db_user",
    +      password="USER_PASSWORD")
    +   conn.auto_reconnect = True
    +except mariadb.Error as e:
    +   print(f"Error connecting to the database: {e}")
    +   sys.exit(1)
    +
    +# Use Connection
    +# ...
    +
    +# Close Connection
    +conn.close()
    +
    +
    # Module Import
    +import mariadb
    +import sys
    +
    +# Instantiate Connection
    +try:
    +   conn = mariadb.connect(
    +      host="SKYSQL_SERVICE.mdb0000001.db.skysql.com,SKYSQL_SERVICE.mdb0000002.db.skysql.com",
    +      port=5009,
    +      ssl_verify_cert=True,
    +      user="DB00000001",
    +      password="USER_PASSWORD")
    +   conn.auto_reconnect = True
    +except mariadb.Error as e:
    +   print(f"Error connecting to the database: {e}")
    +   sys.exit(1)
    +
    +# Use Connection
    +# ...
    +
    +# Close Connection
    +conn.close()
    +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git "a/Connecting to Sky DBs/Connect from \342\200\230C++\342\200\231 App/index.html" "b/Connecting to Sky DBs/Connect from \342\200\230C++\342\200\231 App/index.html" new file mode 100644 index 00000000..726e489e --- /dev/null +++ "b/Connecting to Sky DBs/Connect from \342\200\230C++\342\200\231 App/index.html" @@ -0,0 +1,2902 @@ + + + + + + + + + + + + + + + + + + + + + + + Connect from ā€˜C++ā€™ App - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Connect from ā€˜C++ā€™ App

    +

    MariaDB Connector/C++ enables C++ applications to establish client connections to SkySQL over TLS.

    +

    Requirements

    +

    MariaDB Connector/C++ has dependencies. You must install MariaDB Connector/C to use it.

    + + + + + + + + + + + + + + + + + +
    MariaDB Connector/C++MariaDB Connector/C
    1.13.2.3 or later
    1.03.1.1 or later
    +

    For additional information, see "MariaDB Connector/C++ Release Notes".

    +

    Linux Installation (Binary Tarball)

    +

    To install MariaDB Connector/C++ on Linux:

    +
      +
    1. Install MariaDB Connector/C.
    2. +
    3. Go to theĀ MariaDB Connector C++ download page.
    4. +
    5. In the "OS" dropdown, select the Linux distribution you want to use.
    6. +
    7. Click the "Download" button to download the binary tarball.
    8. +
    9. +

      Extract the tarball:

      +
      tar -xvzf mariadb-connector-cpp-*.tar.gz
      +
      +
    10. +
    11. +

      Change into the relevant directory:

      +
      cd mariadb-connector-cpp-*/
      +
      +
    12. +
    13. +

      Install the directories for the header files:

      +
      sudo install -d /usr/include/mariadb/conncpp
      +sudo install -d /usr/include/mariadb/conncpp/compat
      +
      +
    14. +
    15. +

      Install the header files:

      +
      sudo install include/mariadb/* /usr/include/mariadb/
      +sudo install include/mariadb/conncpp/* /usr/include/mariadb/conncpp
      +sudo install include/mariadb/conncpp/compat/* /usr/include/mariadb/conncpp/compat
      +
      +
    16. +
    17. +

      Install the directories for the shared libraries:

      +
        +
      • +

        On CentOS, RHEL, Rocky Linux:

        +
        sudo install -d /usr/lib64/mariadb
        +sudo install -d /usr/lib64/mariadb/plugin
        +
        +
      • +
      • +

        On Debian, Ubuntu:

        +
        sudo install -d /usr/lib/mariadb
        +sudo install -d /usr/lib/mariadb/plugin
        +
        +
      • +
      +
    18. +
    19. +

      Install the shared libraries:

      +
        +
      • +

        On CentOS, RHEL, Rocky Linux:

        +
        sudo install lib64/mariadb/libmariadbcpp.so /usr/lib64
        +sudo install lib64/mariadb/plugin/* /usr/lib64/mariadb/plugin
        +
        +
      • +
      • +

        On Debian, Ubuntu:

        +
        sudo install lib/mariadb/libmariadbcpp.so /usr/lib
        +sudo install lib/mariadb/plugin/* /usr/lib/mariadb/plugin
        +
        +
      • +
      +
    20. +
    +

    Windows Installation (MSI)

    +

    To install MariaDB Connector/C++ on Windows:

    +
      +
    1. MariaDB Connector/C dependency will be installed when Connector/C++ is installed.
    2. +
    3. Go to theĀ MariaDB Connector C++ download page for MS Windows.
    4. +
    5. Click the "Download" button to download the MSI package.
    6. +
    7. Run the MSI package and click "Next" to start the Setup Wizard.
    8. +
    9. On the second screen, click the license agreement checkbox, then click "Next."
    10. +
    11. On the third screen, click "Typical."
    12. +
    13. On the fourth screen, click "Install."
    14. +
    15. Click "Finish."
    16. +
    17. Add the directory path that contains theĀ mariadbcppĀ LIBĀ file (exampleĀ "C:\ProgramĀ Files\MariaDB\MariaDBĀ C++Ā ConnectorĀ 64-bit") toĀ PATHĀ environment variable.
    18. +
    +

    For latest release visit C & C++ Connectors

    +

    Connection Info

    +

    The connection is configured via the information that is initially acquired from the SkySQL Portal pages:

    + + + + + + + + + + + + + + + + + + + + + + + + + +
    What to setWhere to find it
    Hostname in the URLThe fully Qualified Domain Name from the "Connect" window in SkySQL portal
    Port number in the URLThe Read-Write Port or Read-Only Port from the "Connect" window in SkySQL portal
    userĀ parameterThe desired username, which might be the default username in the Service Credentials view
    passwordĀ parameterThe user's password, which might be the default password in the Service Credentials view if it was not yet customized
    +

    Connection URL Syntax

    +

    While MariaDB Connector/C++ supports several connection styles, we are going to detail just the JDBC syntax since all connections to SkySQL use a single idiom of hostname, port, user, password, and SSL parameters.

    +

    The base URL is specified as follows:

    +
    jdbc:mariadb://example.skysql.com:5001/dbname
    +
    +

    If the trailing database name is left off of the URL, the connection will start without selecting a database.

    +

    Optional Connection Parameters

    +

    MariaDB Connector/C++ supports several optional connection parameters. These parameters can be specified using aĀ PropertiesĀ object, as we do in our examples, or appended to the URL in standardĀ name=valueĀ query-string encoding.

    +

    In the following list, we've left out any parameters that aren't pertinent to accessing SkySQL:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Parameter NameDescriptionTypeDefaultAliases
    autoReconnectDefines whether the connector automatically reconnects after a connection failure.boolfalseOPT_RECONNECT
    connectTimeoutDefines the connect timeout value in milliseconds. When set toĀ 0, there is no connect timeout.int30000
    enabledTlsCipherSuitesA list of permitted ciphers or cipher suites to use for TLS.stringenabledSslCipherSuites
    jdbcCompliantTruncationThis mode is enabled by default. This mode configures the connector to addĀ STRICT_TRANS_TABLESĀ toĀ sql_mode, which causes ES to handle truncation issues as errors instead of warnings.booltrue
    passwordDefines the password of the user account to connect with.
    socketTimeoutDefines the network socket timeout (SO_TIMEOUT) in milliseconds. When set toĀ 0, there is no socket timeout. This connection parameter is not intended to set a maximum time for statements. To set a maximum time for statements, please see theĀ max_statement_time.int0OPT_READ_TIMEOUT
    tcpRcvBufThe buffer size for TCP/IP and socket communication.Ā tcpSndBufĀ changes the same buffer value, and the biggest value of the two is selected.int0x4000tcpSndBuf
    tcpSndBufThe buffer size for TCP/IP and socket communication.Ā tcpRcvBufĀ changes the same buffer value, and the biggest value of the two is selected.int0x4000tcpRcvBuf
    tlsCertPath to the X509 certificate file.stringsslCert
    tlsCRLPath to a PEM file that should contain one or more revoked X509 certificates.stringtlsCrl
    useCompressionCompresses network traffic between the client and server.boolfalseCLIENT_COMPRESS
    userDefines the user name of the user account to connect with.userName
    useServerPrepStmtsDefines whether the connector uses server-side prepared statements using theĀ PREPARE Statement,Ā EXECUTE statement, andĀ DEALLOCATE / DROP PREPARE statementsĀ statements. By default, the connector uses client-side prepared statements.boolfalse
    useTlsWhether to force TLS. This enables TLS with the default system settings.booluseSsl
    +

    Connection Methods

    +

    Two categories of methods are available to to establish a connection.

    +

    sql::Driver::connect()

    +

    MariaDB Connector/C++ can connect using the non-staticĀ connect()Ā methods in theĀ sql::DriverĀ class.

    +

    The non-staticĀ connect()Ā methods in theĀ sql::DriverĀ class have the following prototypes:

    +
      +
    • Connection*Ā connect(constĀ SQLString&Ā url,Ā Properties&Ā props);
    • +
    • Connection*Ā connect(constĀ SQLString&Ā host,Ā constĀ SQLString&Ā user,Ā constĀ SQLString&Ā pwd);
    • +
    • Connection*Ā connect(constĀ Properties&Ā props);
    • +
    +

    The non-staticĀ connect()Ā methods in theĀ sql::DriverĀ class:

    +
      +
    • Require an instance of theĀ sql::DriverĀ class to establish a connection.
    • +
    • ReturnĀ nullptrĀ as theĀ Connection*Ā value when an error occurs, so applications should check the return value before use.
    • +
    +

    For example:

    +
    // Instantiate Driver
    +sql::Driver* driver = sql::mariadb::get_driver_instance();
    +
    +// Configure Connection, including an optional initial database name "places":
    +sql::SQLString url("jdbc:mariadb://example.skysql.com:5009/places");
    +
    +// Use a properties map for the other connection options
    +sql::Properties properties({
    +      {"user", "db_user"},
    +      {"password", "db_user_password"},
    +      {"autocommit", false},
    +      {"useTls", true},
    +      {"tlsCert", "classpath:static/skysql_chain.pem"},
    +   });
    +
    +// Establish Connection
    +// Use a smart pointer for extra safety
    +std::unique_ptr<sql::Connection> conn(driver->connect(url, properties));
    +
    +if (!conn) {
    +   cerr << "Invalid database connection" << endl;
    +   exit (EXIT_FAILURE);
    +}
    +
    +

    sql::DriverManager::getConnection()

    +

    MariaDB Connector/C++ can connect using the staticĀ getConnection()Ā methods in theĀ sql::DriverManagerĀ class.

    +

    The staticĀ getConnection()Ā methods in theĀ sql::DriverManagerĀ class have the following prototypes:

    +
      +
    • staticĀ Connection*Ā getConnection(constĀ SQLString&Ā url);
    • +
    • staticĀ Connection*Ā getConnection(constĀ SQLString&Ā url,Ā Properties&Ā props);
    • +
    • staticĀ Connection*Ā getConnection(constĀ SQLString&Ā url,Ā constĀ SQLString&Ā user,Ā constĀ SQLString&Ā pwd);
    • +
    +

    The staticĀ getConnection()Ā methods in theĀ sql::DriverManagerĀ class:

    +
      +
    • Do not require an instance of theĀ sql::DriverManagerĀ class to establish a connection, because they are static.
    • +
    • Throw an exception when an error occurs, so applications should useĀ tryĀ {Ā ..Ā }Ā catchĀ (Ā ..Ā )Ā {Ā ..Ā }Ā to catch the exception.
    • +
    +

    For example:

    +
    try {
    +    // Configure Connection, including an optional initial database name "places":
    +    sql::SQLString url("jdbc:mariadb://example.skysql.com:5009/places");
    +
    +    // Use a properties map for the other connection options
    +    sql::Properties properties({
    +          {"user", "db_user"},
    +          {"password", "db_user_password"},
    +          {"autocommit", false},
    +          {"useTls", true},
    +          {"tlsCert", "classpath:static/skysql_chain.pem"},
    +       });
    +
    +    // Establish Connection
    +    // Use a smart pointer for extra safety
    +    std::unique_ptr<sql::Connection> conn(DriverManager::getConnection(url, properties));
    + } catch (...) {
    +    cerr << "Invalid database connection" << endl;
    +    exit (EXIT_FAILURE);
    +}
    +
    +

    Code Example: Connect

    +

    The following code demonstrates how to connect using theĀ example database and user account:

    +
    // Includes
    +#include <iostream>
    +#include <mariadb/conncpp.hpp>
    +
    +// Main Process
    +int main(int argc, char **argv)
    +{
    +   try {
    +      // Instantiate Driver
    +      sql::Driver* driver = sql::mariadb::get_driver_instance();
    +
    +      // Configure Connection, including initial database name "test":
    +      sql::SQLString url("jdbc:mariadb://example.skysql.com:5009/test");
    +
    +      // Use a properties map for the other connection options
    +      sql::Properties properties({
    +            {"user", "db_user"},
    +            {"password", "db_user_password"},
    +            {"autocommit", false},
    +            {"useTls", true},
    +            {"tlsCert", "classpath:static/skysql_chain.pem"},
    +         });
    +
    +      // Establish Connection
    +      // Use a smart pointer for extra safety
    +      std::unique_ptr<sql::Connection> conn(driver->connect(url, properties));
    +
    +      // Use Connection
    +      // ...
    +
    +      // Close Connection
    +      conn->close();
    +   }
    +
    +   // Catch Exceptions
    +   catch (sql::SQLException& e) {
    +      std::cerr << "Error Connecting to the database: "
    +         << e.what() << std::endl;
    +
    +      // Exit (Failed)
    +      return 1;
    +   }
    +
    +   // Exit (Success)
    +   return 0;
    +}
    +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git "a/Connecting to Sky DBs/Connect from \342\200\230C\342\200\231 App/index.html" "b/Connecting to Sky DBs/Connect from \342\200\230C\342\200\231 App/index.html" new file mode 100644 index 00000000..f4847149 --- /dev/null +++ "b/Connecting to Sky DBs/Connect from \342\200\230C\342\200\231 App/index.html" @@ -0,0 +1,2666 @@ + + + + + + + + + + + + + + + + + + + + + + + Connect from ā€˜Cā€™ App - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Connect from ā€˜Cā€™ App

    +

    MariaDB Connector/C enables C and C++ applications to establish client connections to SkySQL over TLS. MariaDB Connector/C is a native connector that is written in C.

    +

    First Install MariaDB Connector/C

    +

    MariaDB Connector/C enables C and C++ applications to establish client connections to SkySQL and MariaDB database products over TLS.

    +

    Additional information on MariaDB Connector/C is available in theĀ MariaDB Knowledge Base.

    +

    Connection Info

    +

    The connection is configured via the information that is initially acquired from the SkySQL Portal pages:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    FunctionOption/ArgumentWhere to find it
    mysql_real_connect()hostĀ argumentThe fully Qualified Domain Name from the "Connect" window in SkySQL portal
    mysql_real_connect()userĀ argumentThe desired username, which might be the default username in the Service Credentials view
    mysql_real_connect()passwdĀ argumentThe user's password, which might be the default password in the Service Credentials view if it was not yet customized
    mysql_real_connect()portĀ argumentThe Read-Write Port or Read-Only Port from the "Connect" window in SkySQL portal
    +

    Code Example

    +

    The following code demonstrates how to use MariaDB Connector/C to connect to SkySQL. This example uses theĀ example database and user account:

    +
    #include <stdio.h>
    +#include <stdlib.h>
    +#include <mysql.h>
    +
    +int main (int argc, char* argv[])
    +{
    +
    +   // Initialize Connection
    +   MYSQL *conn;
    +   if (!(conn = mysql_init(0)))
    +   {
    +      fprintf(stderr, "unable to initialize connection struct\n");
    +      exit(1);
    +   }
    +
    +   // Connect to the database
    +   if (!mysql_real_connect(
    +         conn,                 // Connection
    +         "example.skysql.com", // Host
    +         "db_user",            // User account
    +         "db_user_password",   // User password
    +         "test",               // Default database
    +         3006,                 // Port number
    +         NULL,                 // Path to socket file
    +         0                     // Additional options
    +      ))
    +   {
    +      // Report the failed-connection error & close the handle
    +      fprintf(stderr, "Error connecting to Server: %s\n", mysql_error(conn));
    +      mysql_close(conn);
    +      exit(1);
    +   }
    +
    +   // Use the Connection
    +   // ...
    +
    +   // Close the Connection
    +   mysql_close(conn);
    +
    +   return 0;
    +}
    +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Connecting to Sky DBs/Connect using Connector R2DBC/index.html b/Connecting to Sky DBs/Connect using Connector R2DBC/index.html new file mode 100644 index 00000000..be5fcc6b --- /dev/null +++ b/Connecting to Sky DBs/Connect using Connector R2DBC/index.html @@ -0,0 +1,2535 @@ + + + + + + + + + + + + + + + + + + + + + + + Connect using Connector/R2DBC - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Connect using Connector/R2DBC

    +

    Java developers can use MariaDB Connector/R2DBC to connect to SkySQL using the Reactive Relational Database Connectivity (R2DBC) API. R2DBC operations are non-blocking, which makes the R2DBC API more scalable than Java's standard JDBC API. MariaDB Connector/R2DBC is available both with a native R2DBC implementation and the Spring Data R2DBC framework.

    +

    Visit MariaDB R2DBC Connector page.

    +

    Resources

    + + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Connecting to Sky DBs/Connect using MariaDB CLI/index.html b/Connecting to Sky DBs/Connect using MariaDB CLI/index.html new file mode 100644 index 00000000..c75987af --- /dev/null +++ b/Connecting to Sky DBs/Connect using MariaDB CLI/index.html @@ -0,0 +1,2897 @@ + + + + + + + + + + + + + + + + + + + + + + + Connect using MariaDB CLI - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Connect using MariaDB CLI

    + +

    MariaDB Client is available for Linux and Windows

    +

    1. Installation

    +

    Installation of MariaDB Client varies by operating system.

    +

    CentOS / RHEL

    +
      +
    1. +

      Configure YUM package repositories:

      +
      sudo yum install wget
      +wget https://downloads.mariadb.com/MariaDB/mariadb_repo_setup
      +echo "30d2a05509d1c129dd7dd8430507e6a7729a4854ea10c9dcf6be88964f3fdc25 mariadb_repo_setup" \
      +    | sha256sum -c -
      +
      +chmod +x mariadb_repo_setup
      +
      +sudo ./mariadb_repo_setup --mariadb-server-version="mariadb-10.11"
      +
      +
    2. +
    3. +

      Install MariaDB Client and package dependencies:

      +
      sudo yum install MariaDB-client
      +
      +
    4. +
    +

    Debian / Ubuntu

    +
      +
    1. +

      Configure APT package repositories:

      +
      sudo apt install wget
      +
      +wget https://downloads.mariadb.com/MariaDB/mariadb_repo_setup
      +
      +echo "30d2a05509d1c129dd7dd8430507e6a7729a4854ea10c9dcf6be88964f3fdc25 mariadb_repo_setup" \
      +    | sha256sum -c -
      +
      +chmod +x mariadb_repo_setup
      +
      +sudo ./mariadb_repo_setup --mariadb-server-version="mariadb-10.11"
      +
      +sudo apt update
      +
      +
    2. +
    3. +

      Install MariaDB Client and package dependencies:

      +
      sudo apt install mariadb-client
      +
      +
    4. +
    +

    SLES

    +
      +
    1. +

      Configure ZYpp package repositories:

      +
      sudo zypper install wget
      +
      +wget https://downloads.mariadb.com/MariaDB/mariadb_repo_setup
      +
      +echo "30d2a05509d1c129dd7dd8430507e6a7729a4854ea10c9dcf6be88964f3fdc25 mariadb_repo_setup" \
      +    | sha256sum -c -
      +
      +chmod +x mariadb_repo_setup
      +
      +sudo ./mariadb_repo_setup --mariadb-server-version="mariadb-10.6"
      +
      +
    2. +
    3. +

      Install MariaDB Client and package dependencies: +

      sudo zypper install MariaDB-client
      +

      +
    4. +
    +

    Windows

    +
      +
    1. +

      Access MariaDB Downloads for MariaDB Community Server.

      +
    2. +
    3. +

      In the "Version" dropdown, select the version you want to download.

      +
    4. +
    5. +

      In the "OS" dropdown, select "MS Windows (64-bit)".

      +
    6. +
    7. +

      Click the "Download" button to download the MSI package.

      +
    8. +
    9. +

      When the MSI package finishes downloading, run it.

      +
    10. +
    11. +

      On the first screen, click "Next" to start the Setup Wizard.

      +
    12. +
    13. +

      On the second screen, click the license agreement checkbox, and then click "Next".

      +
    14. +
    15. +

      On the third screen, select the components you want to install. If you only want the standard MariaDB Client tools:

      +
        +
      • Deselect "Database instance".
      • +
      • Deselect "Backup utilities".
      • +
      • Deselect "Development Components".
      • +
      • Deselect "Third party tools".
      • +
      +

      When only "Client programs" is selected, click "Next".

      +
    16. +
    17. +

      On the next screen, click "Install".

      +
    18. +
    19. +

      When the installation process completes, click "Finish".

      +
    20. +
    +

    2. Connect

    +

    Linux

    +
      +
    1. +

      Determine the connection parameters for your SkySQL service.

      +
    2. +
    3. +

      Use your connection parameters in the following command line:

      +
      mariadb --host dbpwf03798702.sysp0000.db1.skysql.com --port 3306 \
      +    --user dbpwf03798702 -p --ssl-verify-server-cert
      +
      +
        +
      • +

        Replace 'dbpwf03798702.sysp0000.db1.skysql.com' with the Fully Qualified Domain Name of your service.

        +
      • +
      • +

        You can use 3307 for the port if running with Replicas. This is the read-only port of your service.

        +
      • +
      • +

        Replace the user name with the one for your service.

        +
      • +
      +
    4. +
    5. +

      After the command is executed, you will be prompted for the password of your database user account. Enter the default password for your default user, the password you set for the default user, or the password for the database user you created.

      +
    6. +
    +

    Windows

    +
      +
    1. +

      Fix your executable search path.

      +
    2. +
    3. +

      On Windows, MariaDB Client is not typically found in the executable search path by default. You must find its installation path, and add that path to the executable search path:

      +
      SET "PATH=C:\Program Files\MariaDB 10.6\bin;%PATH%"
      +
      +
    4. +
    5. +

      Use your connection parameters in the following command line:

      +
      mariadb --host dbpwf03798702.sysp0000.db1.skysql.com --port 3306 \
      +    --user dbpwf03798702 -p --ssl-verify-server-cert
      +
      +
        +
      • +

        Replace 'dbpwf03798702.sysp0000.db1.skysql.com' with the Fully Qualified Domain Name of your service.

        +
      • +
      • +

        You can use 3307 for the port if running with Replicas. This is the read-only port of your service.

        +
      • +
      • +

        Replace the user name with the one for your service.

        +
      • +
      +
    6. +
    7. +

      After the command is executed, you will be prompted for the password of your database user account. Enter the default password for your default user, the password you set for the default user, or the password for the database user you created.

      +
    8. +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Connecting to Sky DBs/Connect using ODBC/index.html b/Connecting to Sky DBs/Connect using ODBC/index.html new file mode 100644 index 00000000..3a1a50ea --- /dev/null +++ b/Connecting to Sky DBs/Connect using ODBC/index.html @@ -0,0 +1,3100 @@ + + + + + + + + + + + + + + + + + + + + + + + Connect using ODBC - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Connect using ODBC

    +

    Overview

    +

    Application developers can use MariaDB Connector/ODBC to establish a data source for client connections with SkySQL.

    +

    The method for configuring the data source varies between operating systems.

    +

    Configuring a Data Source on Linux

    +
      +
    1. +

      Configure unixODBC to recognize the driver by creating a file called MariaDB_odbc_driver_template.iniwith the relevant driver definition.

      +

      For example, on CentOS / RHEL / Rocky Linux:

      +
      [MariaDB ODBC 3.1 Driver]
      +Description = MariaDB Connector/ODBC v.3.1
      +Driver      = /usr/lib64/libmaodbc.so
      +
      +

      On Debian / Ubuntu:

      +
      [MariaDB ODBC 3.1 Driver]
      +Description = MariaDB Connector/ODBC v.3.1
      +Driver      = /usr/lib/libmaodbc.so
      +
      +
    2. +
    3. +

      Install the driver using the odbcinst command.

      +

      For example:

      +
      sudo odbcinst -i -d -f MariaDB_odbc_driver_template.ini
      +
      +
    4. +
    5. +

      Determine the connection parameters for your database.

      +
    6. +
    7. +

      Configure unixODBC to connect to the data source by creating a file called MariaDB_odbc_data_source_template.iniwith the relevant data source parameters. Be sure to specify SSLVERIFY = 1 for your SkySQL database.

      +

      For example:

      +
      # Data Source for unixODBC
      +[My-Test-Server]
      +Description = Describe your database setup here
      +Driver      = MariaDB ODBC 3.1 Driver
      +Trace       = Yes
      +TraceFile   = /tmp/trace.log
      +SERVER      = localhost
      +SOCKET      = /var/run/mysqld/mysqld.sock
      +USER        = db_user
      +PASSWORD    = db_user_password
      +DATABASE    = test
      +
      +
      # Data Source for unixODBC
      +[My-Test-Server]
      +Description = Describe your database setup here
      +Driver      = MariaDB ODBC 3.1 Driver
      +Trace       = Yes
      +TraceFile   = /tmp/trace.log
      +SERVER      = example.skysql.com
      +PORT        = 3306
      +SSLVERIFY   = 1
      +USER        = db_user
      +PASSWORD    = db_user_password
      +DATABASE    = test
      +
      +
        +
      • Customize the values of the parameters with the relevant information for your environment.
      • +
      • If you have SSL certificate files, you can add the following parameters to your data source file:
      • +
      +
      SSLCA = /path/to/ca-cert.pem
      +SSLKEY = /path/to/client-key.pem
      +SSL_CERT = /path/to/client-cert.pem
      +
      +
    8. +
    9. +

      Install the unixODBC data source template file:

      +
      $ sudo odbcinst -i -s -h -f MariaDB_odbc_data_source_template.ini
      +
      +
    10. +
    11. +

      Test the data source My-Test-Serverconfigured in the MariaDB_odbc_data_source_template.ini +file using the isql command. If you see the output below, you have successfully connected to your Sky database.

      +
      $ isql -v My-Test-Server
      ++-------------------------+
      +| Connected!              |
      +| sql-statement           |
      +| help[tablename]         |
      +| quit                    |
      ++-------------------------+
      +SQL>
      +
      +
    12. +
    13. +

      To select your new data source in your application, select the data source with the name that you configured, which is My-Test-Server in the above example.

      +
    14. +
    +

    Configuring a Data Source on macOS

    +
      +
    1. +

      Confirm that MariaDB Connector/ODBC has been registered withiODBC by confirming that the following options are set in the iODBCconfiguration file at /Library/ODBC/odbcinst.ini:

      +
      [ODBC]
      +Trace     = no
      +TraceFile = /tmp/iodbc_trace.log
      +
      +[ODBC Drivers]
      +MariaDB ODBC 3.1 Unicode Driver = Installed
      +
      +[MariaDB ODBC 3.1 Unicode Driver]
      +Driver      = /Library/MariaDB/MariaDB-Connector-ODBC/libmaodbc.dylib
      +Description = MariaDB Connector/ODBC(Unicode) 3.1 64bit
      +Threading   = 0
      +
      +
    2. +
    3. +

      Determine the connection parameters for your database.

      +
    4. +
    5. +

      Add a data source for your database to iODBC by adding the following options to the iODBC configuration file at /Library/ODBC/odbc.ini:

      +
      [ODBC Data Sources]
      +My-Test-Server = MariaDB ODBC 3.1 Unicode Driver
      +
      +[My-Test-Server]
      +Driver   = /Library/MariaDB/MariaDB-Connector-ODBC/libmaodbc.dylib
      +SERVER   = 192.0.2.1
      +DATABASE = test
      +USER     = db_user
      +PASSWORD = db_user_password
      +
      +
        +
      • Substitute the values of the SERVER, SOCKET, DATABASE, PORT, USER, and PASSWORD parameters with the relevant value for your environment.
      • +
      • Test the data source using the iodbctestcommand:
      • +
      +
      iodbctest "DSN=My-Test-Server"
      +
      +
    6. +
    7. +

      To select your new data source in your application, select the data source with the name that you configured, which is My-Test-Server in the above example.

      +
    8. +
    +

    Configuring a Data Source on Windows

    +

    MariaDB Connector/ODBC requires at least Windows 8.

    +

    Windows 10 was used to prepare these instructions. When using other versions of Windows, these instructions may require adjustment.

    +
      +
    1. In the start menu, search for "ODBC Data Sources".
    2. +
    3. In the search results, open the application called "ODBC Data Sources (32-bit)" or "ODBC Data Sources (64-bit)", depending on whether you need a data source for a 32-bit or 64-bit application.
    4. +
    5. In the ODBC Data Source Administrator, click the "Add" button on the right side.
    6. +
    7. In the "Create New Data Source" window:
        +
      • Click on "MariaDB ODBC 3.1 Driver" in the list.
      • +
      • Click the "Finish" button.
      • +
      +
    8. +
    9. In the "Create a new Data Source to MariaDB" window:
        +
      • In the "Name" text box, enter a name for the data source.
      • +
      • In the "Description" test box, enter a description for the data source.
      • +
      • Click the "Next" button.
      • +
      +
    10. +
    11. In the next window, provide the connection credentials:
        +
      • In the "Server Name" field, provide the IP address or domain name for the Server.
      • +
      • In the "User name" field, provide the username for the database user account.
      • +
      • In the "Password" field, provide the password for that user.
      • +
      • In the "Database" field, provide the the default database to use.
      • +
      • Then, click the "Next" button.
      • +
      +
    12. +
    +

    wodbc2

    +
      +
    1. Continue configuring the data source using the wizard:
        +
      • The wizard provides a series of windows for configuring various aspects of the connection. Enable settings you want to use.
      • +
      • Click the "Next" button to move onto the next window in the wizard.
      • +
      • In the "TLS Settings" window, make sure that "Verify Certificate" is checked. You can also add your certificate information here.
      • +
      +
    2. +
    +

    wodbc1

    +
      +
    1. Click the "Finish" on the last window to exit the wizard and save your data source.
        +
      • To test your connection, double-click the data source you have created to open the configuration window again. Click "Next" to reach the window titled "How do you want to connect to MariaDB" and click the button labeled "Test DSN". If you see the message below, you have successfully connected.
      • +
      +
    2. +
    +

    wodbc3

    +
      +
    1. To select your new data source in your application, select the data source with the name that you configured for the "Name" field.
    2. +
    +

    Failover

    +

    MariaDB Connector/ODBC supports failover in case one or more hosts are not available.

    +

    The failover feature requires using MariaDB Connector/ODBC 3.1.16 or greater with MariaDB Connector/C 3.3 or greater.

    +

    MariaDB Connector/ODBC 3.1.16 and greater is statically linked for Windows and macOS with MariaDB Connector/C 3.3.1. +MariaDB Connector/ODBC 3.1.16 and greater is dynamically linked for Linux with MariaDB Connector/C.

    +

    The failover feature is enabled by providing a comma separated list of hosts as a server name.

    +

    The failover host string is the SERVER string. If the SERVER string does not include a port, the default port will be used.

    +

    The following syntax is required:

    +
      +
    • IPv6 addresses must be enclosed within square brackets "[]"
    • +
    • hostname and port must be separated by a colon ":"
    • +
    • hostname:port pairs must be be separated by a comma ","
    • +
    • If only one hostname:port is specified, the host string must end with a comma
    • +
    • If no port is specified, the default port will be used
    • +
    +

    An example of a failover host string:

    +
    [::1]:3306,192.168.0.1:3307,test.example.com
    +
    +

    Connection Parameters

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Connection ParameterDescriptionDefault Value
    DRIVERā€¢ On Linux, the name of the driver, which is configured in the unixODBC driver template file. On macOS, the path to the driver's shared library, which is installed at /Library/MariaDB/MariaDB-Connector-ODBC/libmaodbc.dylib by default.
    SERVERHost name, IPv4 address, or IPv6 address of the database server.localhost
    SOCKETThe path to the socket file. On Linux, MariaDB Enterprise Server uses different default socket files on different Linux distributions. On Debian / Ubuntu, the default socket file is /var/run/mysqld/mysqld.sock or /run/mysqld/mysqld.sock. On CentOS / RHEL / Rocky Linux, the default socket file is /var/lib/mysql/mysql.sock./tmp/mysql.sock
    DATABASEDatabase name to select upon successful connection. The database must already exist, and the user account must have privileges to select it.
    PORTTCP port of the database server.3306
    USERThe username to use for authentication.
    PASSWORDUser password.
    FORWARDONLYWhen enabled, cursors are created as SQL_CURSOR_FORWARD_ONLY, so they can only move forward. Starting in Connector/ODBC 3.2, cursors are SQL_CURSOR_FORWARD_ONLY by default. In previous releases, cursors are created as SQL_CURSOR_STATIC by default.
    NO_CACHEWhen enabled, result set streaming is enabled, which enables the application to fetch result sets from the server row-by-row instead of caching the entire result set on the client side. Since the application is not caching the entire result set, the application is less likely to run out of memory when working with large result sets.
    STREAMRSAlias for the NO_CACHE connection parameter.
    OPTIONSSee OPTIONS Bitmask
    PREPONCLIENTWhen enabled, the SQLPrepare ODBC API function uses the text protocol and client-side prepared statements (CSPS).
    ATTRSets connection attributes that can be queried via the Performance Schema session_connect_attrs Table when the Performance Schema is enabled. Specify attributes in the format ATTR={=[,<attrname2=attrvalue2,...]}
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    WhatWhere to find it
    DRIVERā€¢ On Linux, the name of the driver, which is configured in the unixODBC driver template file. On macOS, the path to the driver's shared library, which is installed at /Library/MariaDB/MariaDB-Connector-ODBC/libmaodbc.dylib by default.
    SERVERFully Qualified Domain Name in the https://www.notion.so../../../connection-parameters-portal/
    PORTRead-Write Port or Read-Only Port in the https://www.notion.so../../../connection-parameters-portal/
    USERDefault username in the Service Credentials view, or the username you created
    PASSWORDDefault password in the Service Credentials view, the password you set on the default user, or the password for the user you created
    SSLVERIFYSet to 1 to connect with SSL
    FORCETLSSet to 1 to enable TLS
    FORWARDONLYWhen enabled, cursors are created as SQL_CURSOR_FORWARD_ONLY, so they can only move forward. Starting in Connector/ODBC 3.2, cursors are SQL_CURSOR_FORWARD_ONLY by default. In previous releases, cursors are created as SQL_CURSOR_STATIC by default.
    NO_CACHEWhen enabled, result set streaming is enabled, which enables the application to fetch result sets from the server row-by-row instead of caching the entire result set on the client side. Since the application is not caching the entire result set, the application is less likely to run out of memory when working with large result sets.
    STREAMRSAlias for the NO_CACHE connection parameter.
    OPTIONSSee OPTIONS Bitmask
    PREPONCLIENTWhen enabled, the SQLPrepare ODBC API function uses the text protocol and client-side prepared statements (CSPS).
    ATTRSets connection attributes that can be queried via the Performance Schema session_connect_attrs Table when the Performance Schema is enabled. Specify attributes in the format ATTR={=[,<attrname2=attrvalue2,...]}
    +

    OPTIONS Bitmask

    +

    The OPTIONS bitmask +contains the following bits:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Bit NumberBit ValueDescription
    01Unused
    12Tells connector to return the number of matched rows instead of number of changed rows
    416Same as NO_PROMPT connection parameter
    532Forces all cursors to be dynamic
    664Forbids the DATABASE_NAME.TABLE_NAME.COLUMN_NAME syntax
    112048Enables compression in the protocol
    138192Same as the NAMEDPIPE connection parameter
    1665536Same as the USE_MYCNF connection parameter
    201048576Same as the NO_CACHE connection parameter
    212097152Same as the FORWARDONLY connection parameter
    224194304Same as the AUTO_RECONNECT connection parameter
    2667108864Enables multi-statement queries
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Connecting to Sky DBs/connect_window.png b/Connecting to Sky DBs/connect_window.png new file mode 100644 index 00000000..1703bb12 Binary files /dev/null and b/Connecting to Sky DBs/connect_window.png differ diff --git a/Connecting to Sky DBs/index.html b/Connecting to Sky DBs/index.html new file mode 100644 index 00000000..fe7c3b00 --- /dev/null +++ b/Connecting to Sky DBs/index.html @@ -0,0 +1,2654 @@ + + + + + + + + + + + + + + + + + + + + + + + Connecting to Sky DBs - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Connecting to Sky DBs

    +

    This page describes how to connect to a SkySQL database using a MariaDB-compatible client.

    +

    Important - Whitelist your IP address first

    +
    +

    Note

    +

    šŸ’” Access to all services are protected by a firewall, by default. You need to IP whitelist your clientā€™s (your desktop, laptop or server) IP. Just select ā€˜Manage ā€”> Security Accessā€™ and then click ā€˜Add my current IPā€™ to add the IP of your current workstation (laptop, desktop).

    +
    +
    +

    Note

    +

    šŸ’” If you are not sure or unable to obtain the IP address, you can use 0.0.0.0/0 to effectively disable the firewall. Goes without saying ā€” donā€™t do this for your production DBs.

    +
    +

    For more details go to theĀ FirewallĀ settings page.

    +

    Connecting using the MariaDB Client CLI

    +

    Once your DB service is launched, click on the ā€˜Connectā€™ option for your service on the dashboard. This pops up all the required attributes to connect from any SQL client.

    +

    Connection parameters include:

    +
      +
    • Default username
    • +
    • Default password
    • +
    • Hostname (Fully Qualified Domain Name)
    • +
    • TCP port (3306 or 3307)
    • +
    • ssl-verify-server-cert (if SSL is ON)
    • +
    +
    +

    Note

    +

    šŸ’” Unlike previous SkySQL versions, the current version no longer requires clients to supply the Server SSL Certificate for SSL connections. Customers who migrated from MariaDB corporation to SkySQL Inc can continue to use provided certificates (when using the previous SkySQL method for connecting). But, we strongly recommend moving to the connection properties as shown in the Connect window for your service.

    +
    +
    +

    Note

    +

    šŸ’” There is a default config change in the 11.4.2 MariaDB client that requires SSL. This needs to be disabled by setting --ssl-verify-server-cert=0.

    +
    +

    Connect window example

    +

    Install and Connect using the MariaDB client

    +

    After installing the MariaDB client according to your operating system, simply copy/paste the MariaDB CLI command as displayed in the Connect window.

    +

    Connecting from your Application

    +

    Applications can connect to SkySQL using any of the below MariaDB supported connectors. There are several other connectors from the community too.

    + +
    +

    Note

    +

    šŸ’” For Enterprise Server With Replica(s), you can also use any MongoDB client and use theĀ NoSQL Interface

    +
    +

    Connecting from SQL tools

    +

    Clients listed here have been tested to properly connect with SkySQL and execute queries.

    +

    Most of the SQL clients and editors natively support MariaDB. Most often you can also just select 'MySQL' and connect to your SkySQL DB service.

    +
      +
    • Connecting using Java clients like Squirrel SQL
        +
      • All you need to do is to make sure the "useSsl" property is set to 'true' if SSL is ON.
      • +
      +
    • +
    • TablePlus
        +
      • If SSL was configured, you should set the SSL Mode option to 'ENFORCE' and not 'VERIFY-SERVER-CERT'.
      • +
      • When using the "ENFORCE" SSL mode in TablePlus or any MySQL client, the client will still verify that the SSL certificate presented by the server is valid and trusted. This includes verifying that the certificate is issued by a trusted Certificate Authority (CA) and that it has not expired or been revoked.
      • +
      • In the "ENFORCE" mode, the client requires the server to present a valid SSL certificate during the SSL handshake process. - The client will then verify the following aspects of the certificate:
      • +
      • Certificate Chain: The client will check if the server's SSL certificate is part of a valid certificate chain, leading back to a trusted root CA certificate.
      • +
      • Certificate Expiry: The client will verify that the server's SSL certificate has not expired.
      • +
      • Certificate Revocation: The client may also check if the certificate has been revoked by the issuing CA.
      • +
      • If any of these checks fail, the client will not establish the SSL connection and may display an error indicating that the certificate is not valid or trusted.
      • +
      +
    • +
    • MariaDB CLI
    • +
    • DBGate
        +
      • When using SSL, you only have to switch to the SSL Tab in the Connection window and select 'use SSL' and click Connect.
      • +
      +
    • +
    • Sequel Ace - Connect to MariaDB from MacOS
        +
      • In the connection window, you should select 'Require SSL' if your SkySQL database has SSL turned ON (the default).
      • +
      +
    • +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Connecting to Sky DBs/wodbc1.png b/Connecting to Sky DBs/wodbc1.png new file mode 100644 index 00000000..9f8e252a Binary files /dev/null and b/Connecting to Sky DBs/wodbc1.png differ diff --git a/Connecting to Sky DBs/wodbc2.png b/Connecting to Sky DBs/wodbc2.png new file mode 100644 index 00000000..535216c6 Binary files /dev/null and b/Connecting to Sky DBs/wodbc2.png differ diff --git a/Connecting to Sky DBs/wodbc3.png b/Connecting to Sky DBs/wodbc3.png new file mode 100644 index 00000000..a61ad8f4 Binary files /dev/null and b/Connecting to Sky DBs/wodbc3.png differ diff --git a/Data loading, Migration/Import-CSV-data/index.html b/Data loading, Migration/Import-CSV-data/index.html new file mode 100644 index 00000000..4cba0329 --- /dev/null +++ b/Data loading, Migration/Import-CSV-data/index.html @@ -0,0 +1,2739 @@ + + + + + + + + + + + + + + + + + + + + + + + Import CSV data - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    + +
    +
    + + + +
    +
    + + + + +

    Import CSV data

    +

    SkySQL customers can import data into a SkySQL service using theĀ LOADĀ DATAĀ LOCALĀ INFILEĀ SQL statement:

    +
      +
    • TheĀ LOADĀ DATAĀ LOCALĀ INFILEĀ statement can import data from TSV and CSV files
    • +
    • TheĀ LOADĀ DATAĀ LOCALĀ INFILEĀ statement can be executed by any client or connector
    • +
    +
    +

    Note

    +

    Make sure your schema is already created in the database. If you need to import entire databases or create tables, you should use mariab-import.

    +
    +

    Enable Local Infiles

    +

    Support for local infiles must be enabled on the client side and on the SkySQL service.

    +

    Enable Local Infiles on the Client or Connector

    +

    To execute theĀ LOADĀ DATAĀ LOCALĀ INFILEĀ statement, most clients and connectors require a specific option to be enabled.

    +

    If you are usingĀ mariadbĀ client, theĀ --local-infileĀ optionĀ must be specified.

    +

    Enable Local Infiles in SkySQL

    +

    Support for local infiles must be enabled on the SkySQL service.

    +

    For SkySQL services that use MariaDB Enterprise Server, theĀ local_infileĀ system variableĀ must be enabled:

    +
      +
    • For Replicated Transactions and Single Node Transactions services, theĀ local_infileĀ system variableĀ isĀ OFFĀ by default
    • +
    +

    Configuration ManagerĀ can be used to modify the value of theĀ local_infileĀ system variable.

    +

    Import Data

    +
      +
    1. Determine theĀ connection parametersĀ for your SkySQL service.
    2. +
    3. Connect with theĀ mariadbĀ client and specify theĀ -local-infileĀ option, which is needed by the next step:
    4. +
    +
    mariadb --host FULLY_QUALIFIED_DOMAIN_NAME --port TCP_PORT \
    +      --user DATABASE_USER --password \
    +      --ssl-verify-server-cert \
    +      --default-character-set=utf8 \
    +      --local-infile
    +
    +

    After the command is executed, you will be prompted for a password. Enter the default password for your default user, the password you set for the default user, or the password for the database user you created.

    +

    For each table that you want to import, execute theĀ LOADĀ DATAĀ LOCALĀ INFILEĀ statement to import the data from the TSV or CSV file into your SkySQL database service.

    +

    For a TSV file:

    +
    LOAD DATA LOCAL INFILE 'contacts.tsv'
    +INTO TABLE accounts.contacts;
    +
    +

    For a CSV file:

    +
    LOAD DATA LOCAL INFILE 'contacts.csv'
    +INTO TABLE accounts.contacts
    +FIELDS TERMINATED BY ',';
    +
    +

    Using a Connector

    +

    If you are using a MariaDB Connector, then you must select the method for the specific connector from the list below.

    +

    If you are using MariaDB Connector/C, theĀ MYSQL_OPT_LOCAL_INFILEĀ option can be set with theĀ mysql_optionsv()Ā function:

    +
    /* enable local infile */
    +unsigned int enable_local_infile = 1;
    +mysql_optionsv(mysql, MYSQL_OPT_LOCAL_INFILE, (void *) &enable_local_infile);
    +
    +

    If you are using MariaDB Connector/J, theĀ allowLocalInfileĀ parameter can be set for the connection:

    +
    Connection connection = DriverManager.getConnection("jdbc:mariadb://FULLY_QUALIFIED_DOMAIN_NAME:TCP_PORT/test?user=DATABASE_USER&password=DATABASE_PASSWORD&allowLocalInfile=true");
    +
    +

    If you are using MariaDB Connector/Node.js, theĀ permitLocalInfileĀ parameter can be set for the connection:

    +
    mariadb.createConnection({
    +   host: 'FULLY_QUALIFIED_DOMAIN_NAME',
    +   port: 'TCP_PORT',
    +   user:'DATABASE_USER',
    +   password: 'DATABASE_PASSWORD',
    +   permitLocalInfile: 'true'
    + });
    +
    +

    If you are using MariaDB Connector/Python, theĀ local_infileĀ parameter can be set for the connection:

    +
    conn = mariadb.connect(
    +   user="DATABASE_USER",
    +   password="DATABASE_PASSWORD",
    +   host="FULLY_QUALIFIED_DOMAIN_NAME",
    +   port=TCP_PORT,
    +   local_infile=true)
    +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Data loading, Migration/Install Mariadb-dump/index.html b/Data loading, Migration/Install Mariadb-dump/index.html new file mode 100644 index 00000000..061c79fa --- /dev/null +++ b/Data loading, Migration/Install Mariadb-dump/index.html @@ -0,0 +1,2673 @@ + + + + + + + + + + + + + + + + + + + + + + + Install Mariadb-dump - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Install Mariadb-dump

    +

    SkySQL customers can manually create a backup of a SkySQL service using theĀ mariadb-dumpĀ utility:

    +
      +
    • TheĀ mariadb-dumpĀ utility provides a command-line interface (CLI)
    • +
    • TheĀ mariadb-dumpĀ utility is available for Linux and Windows
    • +
    • TheĀ mariadb-dumpĀ utility supportsĀ many command-line options
    • +
    • Egress charges may apply for customer-initiated backups
    • +
    +

    For details about restoring a backup created with theĀ mariadb-dumpĀ utility, see "Restore a Manual Backup".

    +

    Installation

    +

    Installation of MariaDB Dump varies by operating system.

    +

    CentOS / RHEL

    +
      +
    1. +

      Configure YUM package repositories:

      +
      sudo yum install wget
      +wget https://downloads.mariadb.com/MariaDB/mariadb_repo_setup
      +echo "935944a2ab2b2a48a47f68711b43ad2d698c97f1c3a7d074b34058060c2ad21b mariadb_repo_setup" \
      +    | sha256sum -c -
      +chmod +x mariadb_repo_setup
      +sudo ./mariadb_repo_setup --mariadb-server-version="mariadb-10.6"
      +
      +
    2. +
    3. +

      Install MariaDB Dump and package dependencies:

      +
      sudo yum install MariaDB-client
      +
      +
    4. +
    +

    Debian / Ubuntu

    +
      +
    1. +

      Configure APT package repositories:

      +
      sudo apt install wget
      +wget https://downloads.mariadb.com/MariaDB/mariadb_repo_setup
      +$ echo "935944a2ab2b2a48a47f68711b43ad2d698c97f1c3a7d074b34058060c2ad21b mariadb_repo_setup" 
      +    | sha256sum -c -
      +chmod +x mariadb_repo_setup
      +sudo ./mariadb_repo_setup --mariadb-server-version="mariadb-10.6"$ sudo apt update
      +
      +
    2. +
    3. +

      Install MariaDB Dump and package dependencies:

      +
      sudo apt install mariadb-client
      +
      +
    4. +
    +

    SLES

    +
      +
    1. +

      Configure ZYpp package repositories:

      +
      sudo zypper install wget
      +wget https://downloads.mariadb.com/MariaDB/mariadb_repo_setup
      +echo "935944a2ab2b2a48a47f68711b43ad2d698c97f1c3a7d074b34058060c2ad21b mariadb_repo_setup" \
      +    | sha256sum -c -
      +chmod +x mariadb_repo_setup
      +sudo ./mariadb_repo_setup --mariadb-server-version="mariadb-10.6"
      +
      +
    2. +
    3. +

      Install MariaDB Dump and package dependencies:

      +
      sudo zypper install MariaDB-client
      +
      +
    4. +
    +

    Windows

    +
      +
    1. AccessĀ MariaDB DownloadsĀ for MariaDB Community Server.
    2. +
    3. In the "Version" dropdown, select the version you want to download.
    4. +
    5. In the "OS" dropdown, select "MS Windows (64-bit)".
    6. +
    7. Click the "Download" button to download the MSI package.
    8. +
    9. When the MSI package finishes downloading, run it.
    10. +
    11. On the first screen, click "Next" to start the Setup Wizard.
    12. +
    13. On the second screen, click the license agreement checkbox, and then click "Next".
    14. +
    15. On the third screen, select the components you want to install. If you only want the standard MariaDB Client tools:
        +
      • Deselect "Database instance".
      • +
      • Deselect "Backup utilities".
      • +
      • Deselect "Development Components".
      • +
      • Deselect "Third party tools".
      • +
      • When only "Client programs" is selected, click "Next".
      • +
      +
    16. +
    17. On the next screen, click "Install".
    18. +
    19. When the installation process completes, click "Finish".
    20. +
    +

    Create a logical ā€œDumpā€ SQL file

    +

    The procedure to create a backup depends on the operating system.

    +

    If you plan to restore the backup to a SkySQL service, theĀ mysqlĀ database should be excluded from the backup by specifyingĀ --ignore-database=mysql, because SkySQL user accounts do not have sufficient privileges to restore that database.

    +

    Linux

    +
      +
    1. Determine theĀ connection parametersĀ for your SkySQL service.
    2. +
    3. Use your connection parameters in the following command line:
    4. +
    +
    mariadb-dump --host FULLY_QUALIFIED_DOMAIN_NAME --port TCP_PORT \
    +      --user DATABASE_USER --password \
    +      --ssl-verify-server-cert \
    +      --all-databases \
    +      --ignore-database=mysql \
    +      --single-transaction \
    +      --events \
    +      --routines \
    +      --default-character-set=utf8mb4 \
    +      > skysql_dump.sql
    +
    +
      +
    • ReplaceĀ FULLY_QUALIFIED_DOMAIN_NAMEĀ with the Fully Qualified Domain Name of your service.
    • +
    • ReplaceĀ TCP_PORTĀ with the read-write or read-only port of your service.
    • +
    • ReplaceĀ DATABASE_USERĀ with the default username for your service, or the username you created.
    • +
    +

    After the command is executed, you will be prompted for a password. Enter the default password for your default user, the password you set for the default user, or the password for the database user you created.

    +

    Windows

    +
      +
    1. +

      Fix your executable search path.

      +

      On Windows, MariaDB Dump is not typically found in the executable search path by default. You must find its installation path, and add that path to the executable search path:

      +
      SET "PATH=C:\Program Files\MariaDB 10.6\bin;%PATH%"
      +
      +
    2. +
    3. +

      Determine theĀ connection parametersĀ for your SkySQL service.

      +
    4. +
    5. Use your connection parameters in the following command line:
    6. +
    +
    mariadb-dump --host FULLY_QUALIFIED_DOMAIN_NAME --port TCP_PORT \
    +      --user DATABASE_USER --password \
    +      --ssl-verify-server-cert \
    +      --all-databases \
    +      --ignore-database=mysql \
    +      --single-transaction \
    +      --events \
    +      --routines \
    +      --default-character-set=utf8mb4 \
    +      > skysql_dump.sql
    +
    +
      +
    • ReplaceĀ FULLY_QUALIFIED_DOMAIN_NAMEĀ with the Fully Qualified Domain Name of your service.
    • +
    • ReplaceĀ TCP_PORTĀ with the read-write or read-only port of your service.
    • +
    • ReplaceĀ DATABASE_USERĀ with the default username for your service, or the username you created.
    • +
    +

    After the command is executed, you will be prompted for a password. Enter the default password for your default user, the password you set for the default user, or the password for the database user you created.

    +

    MariaDB Dump 10.3 and Older

    +

    The instructions provided above are written for MariaDB Dump 10.4 and later, which uses the binary filename ofĀ mariadb-dump.

    +

    For MariaDB Dump 10.3 and older, the binary filename wasĀ mysqldump. The instructions can be adapted for MariaDB Dump 10.3 and older by executingĀ mysqldumpĀ rather thanĀ mariadb-dump.

    +

    Temporal Tables

    +

    For system-versioned tables and transaction-precise tables, MariaDB Dump only backs up current row versions. It does not back up historical row versions.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Data loading, Migration/Install-mariadb-import/index.html b/Data loading, Migration/Install-mariadb-import/index.html new file mode 100644 index 00000000..e87d9085 --- /dev/null +++ b/Data loading, Migration/Install-mariadb-import/index.html @@ -0,0 +1,2676 @@ + + + + + + + + + + + + + + + + + + + + + + + Install mariadb-import - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Install mariadb-import

    +

    SkySQL customers can import data into a SkySQL service using theĀ mariadb-importĀ utility:

    +
      +
    • TheĀ mariadb-importĀ utility provides a command-line interface (CLI)
    • +
    • TheĀ mariadb-importĀ utility can import data from TSV and CSV files
    • +
    • TheĀ mariadb-importĀ utility is available for Linux and Windows
    • +
    • TheĀ mariadb-importĀ utility supportsĀ many command-line options
    • +
    +

    Installation

    +

    Installation of MariaDB Import varies by operating system.

    +

    CentOS / RHEL

    +
      +
    1. +

      Configure YUM package repositories:

      +
      sudo yum install wget  
      +wget https://downloads.mariadb.com/MariaDB/mariadb_repo_setup
      +echo "935944a2ab2b2a48a47f68711b43ad2d698c97f1c3a7d074b34058060c2ad21b mariadb_repo_setup" \
      +    | sha256sum -c -
      +chmod +x mariadb_repo_setup   
      +sudo ./mariadb_repo_setup --mariadb-server-version="mariadb-10.6"
      +
      +
    2. +
    3. +

      Install MariaDB Import and package dependencies:

      +
      sudo yum install MariaDB-client
      +
      +
    4. +
    +

    Debian / Ubuntu

    +
      +
    1. +

      Configure APT package repositories:

      +
      sudo apt install wget
      +wget https://downloads.mariadb.com/MariaDB/mariadb_repo_setup
      +echo "935944a2ab2b2a48a47f68711b43ad2d698c97f1c3a7d074b34058060c2ad21b mariadb_repo_setup" \
      +    | sha256sum -c -
      +chmod +x mariadb_repo_setup
      +sudo ./mariadb_repo_setup --mariadb-server-version="mariadb-10.6"
      +sudo apt update
      +
      +
    2. +
    3. +

      Install MariaDB Import and package dependencies:

      +
      sudo apt install mariadb-client
      +
      +
    4. +
    +

    SLES

    +
      +
    1. +

      Configure ZYpp package repositories:

      +
      sudo zypper install wget
      +wget https://downloads.mariadb.com/MariaDB/mariadb_repo_setup
      +$ echo "935944a2ab2b2a48a47f68711b43ad2d698c97f1c3a7d074b34058060c2ad21b mariadb_repo_setup" \
      +    | sha256sum -c -
      +chmod +x mariadb_repo_setup
      +sudo ./mariadb_repo_setup --mariadb-server-version="mariadb-10.6"
      +
      +
    2. +
    3. +

      Install MariaDB Import and package dependencies:

      +
      sudo zypper install MariaDB-client
      +
      +
    4. +
    +

    Windows

    +
      +
    1. AccessĀ MariaDB DownloadsĀ for MariaDB Community Server.
    2. +
    3. In the "Version" dropdown, select the version you want to download.
    4. +
    5. In the "OS" dropdown, select "MS Windows (64-bit)".
    6. +
    7. Click the "Download" button to download the MSI package.
    8. +
    9. When the MSI package finishes downloading, run it.
    10. +
    11. On the first screen, click "Next" to start the Setup Wizard.
    12. +
    13. On the second screen, click the license agreement checkbox, and then click "Next".
    14. +
    15. On the third screen, select the components you want to install. If you only want the standard MariaDB Client tools:
        +
      • Deselect "Database instance".
      • +
      • Deselect "Backup utilities".
      • +
      • Deselect "Development Components".
      • +
      • Deselect "Third party tools".
      • +
      • When only "Client programs" is selected, click "Next".
      • +
      +
    16. +
    17. On the next screen, click "Install".
    18. +
    19. When the installation process completes, click "Finish".
    20. +
    +

    Import Data

    +

    The procedure to import data depends on the operating system.

    +

    Linux

    +
      +
    1. Determine theĀ connection parametersĀ for your SkySQL service.
    2. +
    3. +

      Use MariaDB Import with the connection information to import the data from the TSV or CSV file into your SkySQL database service:

      +
      mariadb-import --host FULLY_QUALIFIED_DOMAIN_NAME --port TCP_PORT \
      +      --user DATABASE_USER --password \
      +      --ssl-verify-server-cert \
      +      --ssl-ca ~/PATH_TO_PEM_FILE \
      +      --local \
      +      --ignore-lines=1 \
      +      accounts contacts.tsv
      +
      +
        +
      • ReplaceĀ FULLY_QUALIFIED_DOMAIN_NAMEĀ with the Fully Qualified Domain Name of your service
      • +
      • ReplaceĀ TCP_PORTĀ with the read-write or read-only port of your service
      • +
      • ReplaceĀ DATABASE_USERĀ with the default username for your service, or the username you created
      • +
      • ReplaceĀ ~/PATH_TO_PEM_FILEĀ with the path to the certificate authority chain (.pem) file
      • +
      • If your file is a CSV file, rather than a TSV file, specifyĀ -fields-terminated-by=,
      • +
      • Specify the database name as the first argument (from above,Ā accounts)
      • +
      • The table name is extracted from the TSV or CSV file's basename (from above,Ā contacts)
      • +
      • After the command is executed, you will be prompted for a password. Enter the default password for your default user, the password you set for the default user, or the password for the database user you created.
      • +
      +
    4. +
    +

    Windows

    +
      +
    1. +

      Fix your executable search path.

      +

      On Windows, MariaDB Import is not typically found in the executable search path by default. You must find its installation path, and add that path to the executable search path:

      +
      SET "PATH=C:\Program Files\MariaDB 10.6\bin;%PATH%"
      +
      +
    2. +
    3. +

      Determine theĀ connection parametersĀ for your SkySQL service.

      +
    4. +
    5. +

      Use MariaDB Import with the connection information to import the data from the TSV or CSV file into your SkySQL database service:

      +
      mariadb-import --host FULLY_QUALIFIED_DOMAIN_NAME --port TCP_PORT \
      +      --user DATABASE_USER --password \
      +      --ssl-verify-server-cert \
      +      --ssl-ca ~/PATH_TO_PEM_FILE \
      +      --local \
      +      --ignore-lines=1 \
      +      accounts contacts.tsv
      +
      +
        +
      • ReplaceĀ FULLY_QUALIFIED_DOMAIN_NAMEĀ with the Fully Qualified Domain Name of your service
      • +
      • ReplaceĀ TCP_PORTĀ with the read-write or read-only port of your service
      • +
      • ReplaceĀ DATABASE_USERĀ with the default username for your service, or the username you created
      • +
      • ReplaceĀ ~/PATH_TO_PEM_FILEĀ with the path to the certificate authority chain (.pem) file
      • +
      • If your file is a CSV file, rather than a TSV file, specifyĀ -fields-terminated-by=,
      • +
      • Specify the database name as the first argument (from above,Ā accounts)
      • +
      • The table name is extracted from the TSV or CSV file's basename (from above,Ā contacts)
      • +
      • After the command is executed, you will be prompted for a password. Enter the default password for your default user, the password you set for the default user, or the password for the database user you created.
      • +
      +
    6. +
    +

    MariaDB Import 10.3 and Older

    +

    The instructions provided above are written for MariaDB Import 10.4 and later, which uses the binary filename ofĀ mariadb-import.

    +

    For MariaDB Import 10.3 and older, the binary filename wasĀ mysqlimport. The instructions can be adapted for MariaDB Import 10.3 and older by executingĀ mysqlimportĀ rather thanĀ mariadb-import.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Data loading, Migration/Migrate-your-database-to-SkySQL/index.html b/Data loading, Migration/Migrate-your-database-to-SkySQL/index.html new file mode 100644 index 00000000..9cc5ca79 --- /dev/null +++ b/Data loading, Migration/Migrate-your-database-to-SkySQL/index.html @@ -0,0 +1,2594 @@ + + + + + + + + + + + + + + + + + + + Migrate your database to SkySQL - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Migrate your database to SkySQL

    +

    SkySQL provides a range of options to suit different migration scenarios.

    +
      +
    • Databases can be migrated to SkySQL from many different database platforms, including Oracle, MySQL, PostgreSQL, Microsoft SQL Server, IBM DB2, and more.
    • +
    • SkySQL supports migraion from both on-premise and cloud-based infrastructure and provides a range of options to suit different migration scenarios.
    • +
    + +

    Below are the most common scenarios for database migration to SkySQL.

    +
    +

    Prerequisites

    +
      +
    1. An active SkySQL account.
    2. +
    3. An existing source database with the IP added to your SkySQL allowlist.
    4. +
    +
    +Considerations +
    + +Ensure that your SkySQL servce deploymned configuration is compatible with your existing source database one, including: +
      +
    • Deployment region - Ensure that the SkySQL deployment region is the same as the source database region.
    • +
    • Topology - Enterprise Server Single node or with Replica(s)
    • +
    • Server version - Ensure that the SkySQL server version is compatible with the source database version.
    • +
    • Instance size - Ensure that the SkySQL instance is compatible with the source database instance type and size
    • +
    • Storage - Ensure that the SkySQL storage type and size is compatible with the source database
    • +
      +
    +
    +

    SkyDBA Assisted Migration

    +
      +
    • Existing customers can submit aĀ support caseĀ to request assistance with a migration.
    • +
    • New customers canĀ contact usĀ to begin the migration planning process.
    • +
    +

    Our SkyDBA team can help design a migration plan to suit your needs.

    +
    +SkyDBA Assisted Migration Approach +
    + We use a multi-step process to assist customers with migrations: +
      +
    • Assessment of application requirements, inventory, and identified challenges
    • +
    • Schema Migration including tables, constraints, indexes, and views
    • +
    • Application Code Migration by porting and testing SQL and application code
    • +
    • Data Migration and Replication with import of data, with conversion to the new schema, and ongoing inbound replication of new data
    • +
    • Quality Assurance to assess data validity, data integrity, performance, accuracy of query results, stored code, and running code such as client applications, APIs, and batch jobs
    • +
    • Cutover including final database preparation, fallback planning, switchover, and decommissioning of old databases
    • +
      +
    +
    +

    Self-Service Migration to SkySQL

    +

    SkySQL provides two diffeent options for self-service migration

    +

    Option 1: Migrate using the SkySQL REST API

    +

    SkySQL Managed Migration is a REST-based service that handles the migration process, including data migration, schema migration, and user migration. It provides a follow us steps to set up a live replication of your database to SkySQL and various insights to monitor the migration process.

    + +

    Option 2: Custom Migration

    +

    For most small, mid-size and large migrations SkySQL Managed Migration is the quickest and safest option. However, for large migrations or migrations with specific requirements, you and your team may require more flexibility and control over the migration process. In these cases, you and your team can design a custom migration plan considering the steps suggested below.

    + + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Data loading, Migration/Migrating Using a Logical Dump and Replication/index.html b/Data loading, Migration/Migrating Using a Logical Dump and Replication/index.html new file mode 100644 index 00000000..ac169b15 --- /dev/null +++ b/Data loading, Migration/Migrating Using a Logical Dump and Replication/index.html @@ -0,0 +1,2642 @@ + + + + + + + + + + + + + + + + + + + + + + + Migrating Using a Logical Dump and Replication - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Migrating Using a Logical Dump and Replication

    +

    To minimize downtime during migration, you can set up live replication from your source database to the SkySQL database.

    +

    Prerequisites

    +
      +
    1. An active SkySQL account. Identify requirements for your SkySQL implementation prior to deployment, including:
    2. +
    3. Topology - Enterprise Server Single node or with Replica(s)
    4. +
    5. Instance size
    6. +
    7. Storage requirements
    8. +
    9. Desired server version
    10. +
    11. An existing source database with the IP added to your SkySQL allowlist.
    12. +
    +

    Steps

    +
      +
    1. +

      Dump the Source Database: Take a dump of your source database using mysqldump or mariadb-dump. Include triggers, procedures, views, and schedules in the dump, and ignore the system databases to avoid conflicts with the existing SkySQL schemas.

      +
      mysqldump --single-transaction --master-data=2 --routines --triggers --ignore-database=mysql --ignore-database=information_schema --ignore-database=performance_schema --ignore-database=sys > dump.sql
      +
      +
    2. +
    3. +

      Create the Users and Grants Separately: To avoid conflicts with the existing SkySQL users, use SELECT CONCAT on your source database to create users and grants in separate files. Note that you may need to create the schema and table grants separately as well.

      +
      mysql -u [username] -p -h [hostname] --silent --skip-column-names -e "SELECT CONCAT('CREATE USER \'', user, '\'@\'', host, '\' IDENTIFIED BY PASSWORD \'', authentication_string, '\';') FROM mysql.user;" > users.sql
      +
      +mysql -h [hostname] -u [username] -p --silent --skip-column-names -e "SELECT CONCAT('GRANT ', privilege_type, ' ON ', table_schema, '.* TO \'', grantee, '\';') FROM information_schema.schema_privileges;" > grants.sql
      +
      +mysql -h [hostname] -u [username] -p --silent --skip-column-names -e "SELECT CONCAT('GRANT ', privilege_type, ' ON ', table_schema, '.', table_name, ' TO \'', grantee, '\';') FROM information_schema.table_privileges;" >> grants.sql
      +
      +
    4. +
    5. +

      Import the Dumps into SkySQL: Import the logical dumps (SQL files) into your SkySQL database, ensuring to load the user and grant dumps after the main dump.

      +
      mariadb -u [SkySQL username] -p -h [SkySQL hostname] --port 3306 --ssl-verify-server-cert < dump.sql
      +mariadb -u [SkySQL username] -p -h [SkySQL hostname] --port 3306 --ssl-verify-server-cert < users.sql
      +mariadb -u [SkySQL username] -p -h [SkySQL hostname] --port 3306 --ssl-verify-server-cert < grants.sql
      +
      +
    6. +
    +

    If you encounter an error while importing your users, you may need to uninstall the simple_password_check plugin on your SkySQL instance.

    +
    ```sql
    +UNINSTALL PLUGIN simple_password_check;
    +```
    +
    +
      +
    1. +

      Start Replication: Turn on replication using SkySQL stored procedures. There are procedures allowing you to set and start replication. See our documentation for details. The dump.sql file you created in step 1 will contain the GTID and binary log information needed for the change_external_primary procedure.

      +

      ```sql +CALL sky.change_external_primary( + host VARCHAR(255), + port INT, + logfile TEXT, + logpos LONG , + use_ssl_encryption BOOLEAN +);

      +

      CALL sky.replication_grants(); +CALL sky.start_replication(); +```

      +
    2. +
    +

    Performance Optimization During Migration

    +
      +
    • +

      Disable Foreign Key Checks: Temporarily disable foreign key checks during import to speed up the process.

      +
      SET foreign_key_checks = 0;
      +
      +
    • +
    • +

      Disable Binary Logging: If binary logging is not required during the import process, and you are using a standalone instance, it can potentially be disabled to improve performance. SkyDBA Services can assist with this as part of a detailed migration plan.

      +
    • +
    +

    Data Integrity and Validation

    +
      +
    • +

      Consistency Checks: Perform consistency checks on the source database before migration. Use a supported SQL client to connect to your SkySQL instance and run the following.

      +
      CHECK TABLE [table_name] FOR UPGRADE;
      +
      +
    • +
    • +

      Post-Import Validation: Validate the data integrity and consistency after the import.

      +
      CHECKSUM TABLE [table_name];
      +
      +
    • +
    +

    Advanced Migration Techniques

    +
      +
    • +

      Adjust Buffer Sizes: Temporarily increase buffer sizes to optimize the import performance. This can be done via the Configuration Manager in the portal.

      +
      innodb_buffer_pool_size = 2G
      +innodb_log_file_size = 512M
      +
      +
    • +
    • +

      Parallel Dump and Import: Use tools that support parallel processing for dumping and importing data.

      +
      mysqldump -u [username] -p --default-parallelism=4 --add-drop-database \
      +    --databases [database_name] > dump.sql
      +
      +
    • +
    • +

      Incremental Backups: For large datasets, incremental backups can be used to minimize the amount of data to be transferred. SkyDBA Services can assist you with setting these up as part of a custom migration plan.

      +
    • +
    +

    Monitoring and Logging

    +
      +
    • +

      Enable Detailed Logging: Enable detailed logging while testing the migration process to monitor and troubleshoot effectively. The slow_log can be enabled in the SkySQL configuration manager.

      +
    • +
    • +

      Resource Monitoring: Use monitoring tools to track resource usage (CPU, memory, I/O) during the migration to ensure system stability. See our monitoring documentation for details.

      +
    • +
    +

    Additional Resources

    + + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Data loading, Migration/Replicating data from external DB/index.html b/Data loading, Migration/Replicating data from external DB/index.html new file mode 100644 index 00000000..1cf13c88 --- /dev/null +++ b/Data loading, Migration/Replicating data from external DB/index.html @@ -0,0 +1,2688 @@ + + + + + + + + + + + + + + + + + + + + + + + Replicating data from an external DB - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Replicating data from an external DB

    +

    From a MySQL Database

    +

    SkySQL customers can configure inbound replication from MySQL 5.7 to a compatible MariaDB running in SkySQL.

    +

    For additional information about the stored procedures used to configure replication with Replicated Transactions services, see "SkySQL Replication Helper Procedures for Replicated Transactions".

    + + +

    1. Obtain Binary Log File and Position

    +

    On the external primary server, obtain the binary log file and position from which to start replication.

    +

    When you want to start replication from the most recent transaction, the current binary log file position can be obtained by executing theĀ SHOWĀ MASTERĀ STATUSĀ statement:

    +
    SHOW MASTER STATUS;
    +
    +`+------------------+----------+--------------+------------------+-------------------+
    +| File             | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
    ++------------------+----------+--------------+------------------+-------------------+
    +| mysql-bin.000001 |      154 |              |                  |                   |
    ++------------------+----------+--------------+------------------+-------------------+
    +
    +

    2. Configure Binary Log File and Position

    +

    On the SkySQL service, configure the binary log file and position from which to start replication.

    +

    The binary log file and position can be configured using theĀ sky.change_external_primary()Ā stored procedure:

    +
    CALL sky.change_external_primary('mysql1.example.com', 3306, 'mysql-bin.000001', 154, false);
    +
    ++--------------------------------------------------------------------------------------------------------------+
    +| Run_this_grant_on_your_external_primary                                                                      |
    ++--------------------------------------------------------------------------------------------------------------+
    +| GRANT REPLICATION SLAVE ON *.* TO 'skysql_replication'@'%' IDENTIFIED BY '<password_hash>';                  |
    ++--------------------------------------------------------------------------------------------------------------+
    +
    +

    This procedure will return the GRANT statement you must run on the source DB.

    +

    3. Grant Replication Privileges

    +

    On the external primary server, execute theĀ GRANTĀ statement returned by the last step:

    +
    GRANT REPLICATION SLAVE ON *.* TO 'skysql_replication'@'%' IDENTIFIED BY '<password_hash>';
    +
    +

    4. Start Replication

    +

    On the SkySQL service, start replication.

    +

    Replication can be started using theĀ sky.start_replication()Ā stored procedure:

    +
    CALL sky.start_replication();
    ++----------------------------------------+
    +| Message                                |
    ++----------------------------------------+
    +| External replication running normally. |
    ++----------------------------------------+
    +
    +

    5. Check Replication Status

    +

    On the SkySQL service, check replication status.

    +

    Replication status can be checked using theĀ sky.replication_status()Ā stored procedure:

    +
    CALL sky.replication_status()\G
    +
    +*************************** 1. row ***************************
    +                Slave_IO_State: Waiting for master to send event
    +                   Master_Host: mariadb1.example.com
    +                   Master_User: skysql_replication
    +                   Master_Port: 3306
    +                 Connect_Retry: 60
    +               Master_Log_File: mysql-bin.000001
    +           Read_Master_Log_Pos: 462
    +                Relay_Log_File: mariadb-relay-bin.000002
    +                 Relay_Log_Pos: 665
    +         Relay_Master_Log_File: mysql-bin.000001
    +              Slave_IO_Running: Yes
    +             Slave_SQL_Running: Yes
    +               Replicate_Do_DB:
    +           Replicate_Ignore_DB:
    +            Replicate_Do_Table:
    +        Replicate_Ignore_Table:
    +       Replicate_Wild_Do_Table:
    +   Replicate_Wild_Ignore_Table:
    +                    Last_Errno: 0
    +                    Last_Error:
    +                  Skip_Counter: 0
    +           Exec_Master_Log_Pos: 462
    +               Relay_Log_Space: 985
    +               Until_Condition: None
    +                Until_Log_File:
    +                 Until_Log_Pos: 0
    +            Master_SSL_Allowed: No
    +            Master_SSL_CA_File:
    +            Master_SSL_CA_Path:
    +               Master_SSL_Cert:
    +             Master_SSL_Cipher:
    +                Master_SSL_Key:
    +         Seconds_Behind_Master: 0
    + Master_SSL_Verify_Server_Cert: No
    +                 Last_IO_Errno: 0
    +                 Last_IO_Error:
    +                Last_SQL_Errno: 0
    +                Last_SQL_Error:
    +   Replicate_Ignore_Server_Ids:
    +              Master_Server_Id: 200
    +                Master_SSL_Crl:
    +            Master_SSL_Crlpath:
    +                    Using_Gtid: No
    +                   Gtid_IO_Pos:
    +       Replicate_Do_Domain_Ids:
    +   Replicate_Ignore_Domain_Ids:
    +                 Parallel_Mode: conservative
    +                     SQL_Delay: 0
    +           SQL_Remaining_Delay: NULL
    +       Slave_SQL_Running_State: Slave has read all relay log; waiting for more updates
    +              Slave_DDL_Groups: 0
    +Slave_Non_Transactional_Groups: 0
    +    Slave_Transactional_Groups: 0
    +
    +

    From a MariaDB Database

    +

    When replicating from another MariaDB database, you can use GTID based replication. The first two steps are different from MySQL.

    +

    1. Obtain GTID Position

    +

    On the external primary server, obtain the GTID position from which to start replication.

    +

    When you want to start replication from the most recent transaction, the current GTID position can be obtained by querying the value of theĀ gtid_current_posĀ system variable with theĀ SHOWĀ GLOBALĀ VARIABLESĀ statement:

    +
    SHOW GLOBAL VARIABLES LIKE 'gtid_current_pos';
    +
    +`+------------------+---------+
    +| Variable_name    | Value   |
    ++------------------+---------+
    +| gtid_current_pos | 0-100-1 |
    ++------------------+---------+
    +
    +

    2. Configure GTID Position

    +

    On the SkySQL service, configure the GTID position from which to start replication.

    +

    The GTID position can be configured using theĀ sky.change_external_primary_gtid()Ā stored procedure:

    +
    CALL sky.change_external_primary_gtid('mariadb1.example.com', 3306, '0-100-1', false);
    +
    ++--------------------------------------------------------------------------------------------------------------+
    +| Run_this_grant_on_your_external_primary                                                                      |
    ++--------------------------------------------------------------------------------------------------------------+
    +| GRANT REPLICATION SLAVE ON *.* TO 'skysql_replication'@'%' IDENTIFIED BY '<password_hash>';                  |
    ++--------------------------------------------------------------------------------------------------------------+
    +
    +

    The stored procedure returns aĀ GRANTĀ statement that is used in the next step.

    + + +

    Compatibility

    +

    To configure inbound replication from an external primary server using MariaDB Server to your Replicated Transactions service in SkySQL, the following requirements must be met:

    +
      +
    • The external primary server must use a supported version of MariaDB Server, and the external primary server must use a version in the same or older release series as the version used by the SkySQL service.
    • +
    • When the SkySQL service usesĀ ES 10.6, the following versions are supported for the external primary server:
        +
      • MariaDB Server 10.2
      • +
      • MariaDB Server 10.3
      • +
      • MariaDB Server 10.4
      • +
      • MariaDB Server 10.5
      • +
      • MariaDB Server 10.6
      • +
      +
    • +
    • When the SkySQL service usesĀ ES 10.5, the following versions are supported for the external primary server:
        +
      • MariaDB Server 10.2
      • +
      • MariaDB Server 10.3
      • +
      • MariaDB Server 10.4
      • +
      • MariaDB Server 10.5
      • +
      +
    • +
    • When the SkySQL service usesĀ ES 10.4, the following versions are supported for the external primary server:
        +
      • MariaDB Server 10.2
      • +
      • MariaDB Server 10.3
      • +
      • MariaDB Server 10.4
      • +
      +
    • +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Data loading, Migration/SkySQL-custom-migration/index.html b/Data loading, Migration/SkySQL-custom-migration/index.html new file mode 100644 index 00000000..7dda7c39 --- /dev/null +++ b/Data loading, Migration/SkySQL-custom-migration/index.html @@ -0,0 +1,2652 @@ + + + + + + + + + + + + + + + + + + + SkySQL custom migration - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    //TODO Add a section on physical backup and restore

    +

    Live Replication for Minimal Downtime

    +

    To minimize downtime during migration, set up live binary-loggd based replication from your source database to the SkySQL database. +Click here for a detailed walk through of the steps involved.

    +

    Follow these steps:

    +

    Replicating data from an External DB

    +
      +
    1. +

      Create the Users and Grants Separately: To avoid conflicts with the existing SkySQL users, use SELECT CONCAT on your source database to create users and grants in separate files. Note that you may need to create the schema and table grants separately as well.

      +
      mysql -u [username] -p -h [hostname] --silent --skip-column-names -e "SELECT CONCAT('CREATE USER \'', user, '\'@\'', host, '\' IDENTIFIED BY PASSWORD \'', authentication_string, '\';') FROM mysql.user;" > users.sql
      +
      +mysql -h [hostname] -u [username] -p --silent --skip-column-names -e "SELECT CONCAT('GRANT ', privilege_type, ' ON ', table_schema, '.* TO \'', grantee, '\';') FROM information_schema.schema_privileges;" > grants.sql
      +
      +mysql -h [hostname] -u [username] -p --silent --skip-column-names -e "SELECT CONCAT('GRANT ', privilege_type, ' ON ', table_schema, '.', table_name, ' TO \'', grantee, '\';') FROM information_schema.table_privileges;" >> grants.sql
      +
      +
    2. +
    3. +

      Import the Dumps into SkySQL: Import the logical dumps (SQL files) into your SkySQL database, ensuring to load the user and grant dumps after the main dump.

      +
      mariadb -u [SkySQL username] -p -h [SkySQL hostname] --port 3306 --ssl-verify-server-cert < dump.sql
      +mariadb -u [SkySQL username] -p -h [SkySQL hostname] --port 3306 --ssl-verify-server-cert < users.sql
      +mariadb -u [SkySQL username] -p -h [SkySQL hostname] --port 3306 --ssl-verify-server-cert < grants.sql
      +
      +
    4. +
    +

    If you encounter an error while importing your users, you may need to uninstall the simple_password_check plugin on your SkySQL instance.

    +
    ```sql
    +UNINSTALL PLUGIN simple_password_check;
    +```
    +
    +
      +
    1. +

      Start Replication: Turn on replication using SkySQL stored procedures. There are procedures allowing you to set and start replication. See our documentation for details. The dump.sql file you created in step 1 will contain the GTID and binary log information needed for the change_external_primary procedure.

      +

      ```sql +CALL sky.change_external_primary( + host VARCHAR(255), + port INT, + logfile TEXT, + logpos LONG , + use_ssl_encryption BOOLEAN +);

      +

      CALL sky.replication_grants(); +CALL sky.start_replication(); +```

      +
    2. +
    +

    Performance Optimization During Migration

    +
      +
    • +

      Disable Foreign Key Checks: Temporarily disable foreign key checks during import to speed up the process.

      +
      SET foreign_key_checks = 0;
      +
      +
    • +
    • +

      Disable Binary Logging: If binary logging is not required during the import process, and you are using a standalone instance, it can potentially be disabled to improve performance. SkyDBA Services can assist with this as part of a detailed migration plan.

      +
    • +
    +

    Data Integrity and Validation

    +
      +
    • +

      Consistency Checks: Perform consistency checks on the source database before migration. Use a supported SQL client to connect to your SkySQL instance and run the following.

      +
      CHECK TABLE [table_name] FOR UPGRADE;
      +
      +
    • +
    • +

      Post-Import Validation: Validate the data integrity and consistency after the import.

      +
      CHECKSUM TABLE [table_name];
      +
      +
    • +
    +

    Advanced Migration Techniques

    +
      +
    • +

      Adjust Buffer Sizes: Temporarily increase buffer sizes to optimize the import performance. This can be done via the Configuration Manager in the portal.

      +
      innodb_buffer_pool_size = 2G
      +innodb_log_file_size = 512M
      +
      +
    • +
    • +

      Parallel Dump and Import: Use tools that support parallel processing for dumping and importing data.

      +
      mysqlpump -u [username] -p --default-parallelism=4 --add-drop-database \
      +    --databases [database_name] > dump.sql
      +
      +
    • +
    • +

      Incremental Backups: For large datasets, incremental backups can be used to minimize the amount of data to be transferred. SkyDBA Services can assist you with setting these up as part of a custom migration plan.

      +
    • +
    +

    Monitoring and Logging

    +
      +
    • +

      Enable Detailed Logging: Enable detailed logging while testing the migration process to monitor and troubleshoot effectively. The slow_log can be enabled in the SkySQL configuration manager.

      +
    • +
    • +

      Resource Monitoring: Use monitoring tools to track resource usage (CPU, memory, I/O) during the migration to ensure system stability. See our monitoring documentation for details.

      +
    • +
    +

    Additional Resources

    + + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Data loading, Migration/SkySQL-managed-migration/index.html b/Data loading, Migration/SkySQL-managed-migration/index.html new file mode 100644 index 00000000..791f4187 --- /dev/null +++ b/Data loading, Migration/SkySQL-managed-migration/index.html @@ -0,0 +1,2481 @@ + + + + + + + + + + + + + + + + + + + SkySQL managed migration - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    SkySQL managed migration

    + +

    //TODO +//Steps to follow +0. Prerequisites +1. Dump using logical or physical backup using the steps mentioned in the Backup and Restore restore from your own bucket section. +2. Upload the dump to S3/GCS bucket under your control +3. Call the migration API - ref - +Sky SQL Managed Migration Tutorial

    +
      +
    1. To minimize downtime during migration, set up live binary-loggd based replication from your source database to the SkySQL database. +Click here for a detailed walk through of the steps involved.
    2. +
    +

    5 Add a section with migration insights

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Data loading, Migration/index.html b/Data loading, Migration/index.html new file mode 100644 index 00000000..0f5511d2 --- /dev/null +++ b/Data loading, Migration/index.html @@ -0,0 +1,2523 @@ + + + + + + + + + + + + + + + + + + + + + + + Data loading, Migration - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    + +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Data loading, Migration/migrate-rds-mysql-to-skysql-using-amazon-data-migration-service_whitepaper_1109.pdf b/Data loading, Migration/migrate-rds-mysql-to-skysql-using-amazon-data-migration-service_whitepaper_1109.pdf new file mode 100644 index 00000000..6741c2f5 Binary files /dev/null and b/Data loading, Migration/migrate-rds-mysql-to-skysql-using-amazon-data-migration-service_whitepaper_1109.pdf differ diff --git a/Data loading, Migration/nr-support-assisted/index.html b/Data loading, Migration/nr-support-assisted/index.html new file mode 100644 index 00000000..1c5c2ae0 --- /dev/null +++ b/Data loading, Migration/nr-support-assisted/index.html @@ -0,0 +1,2481 @@ + + + + + + + + + + + + + + + + + + + Support-Assisted Data Import - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Support-Assisted Data Import

    +

    SkySQL customers can receive assistance when importing data into a SkySQL service:

    +
      +
    • Many file formats are supported
    • +
    • Data of large size can be imported efficiently
    • +
    • Contact Support to request assistance with a data import
    • +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Data offloading/Replicating data from SkySQL to external database/index.html b/Data offloading/Replicating data from SkySQL to external database/index.html new file mode 100644 index 00000000..0a037661 --- /dev/null +++ b/Data offloading/Replicating data from SkySQL to external database/index.html @@ -0,0 +1,2873 @@ + + + + + + + + + + + + + + + + + + + + + + + Replicating data from SkySQL to external database - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Replicating data from SkySQL to external database

    +

    SkySQL customers can configure outbound replication from a Replicated Transactions service to a compatible MariaDB Server running elsewhere - could be your data center, self-managed MariaDB DB on the cloud or even other managed services like AWS RDS.

    +

    SkySQL uses stored procedures to configure replication to other MariaDB or MySQL database servers.

    +

    For additional information about the stored procedures used to configure replication with Replicated Transactions services, see SkySQL Replication Helper Procedures for Replicated Transactions

    +

    Requirements

    +

    To configure outbound replication from your Replicated Transactions service in SkySQL to an external replica server using MariaDB Server, the following requirements must be met:

    +
      +
    • The external replica server must use a supported version of MariaDB Server, and the external replica server must use a version in the same or newer release series as the version used by the SkySQL service.
    • +
    • When the SkySQL service usesĀ ES 10.6, the following versions are supported for the external replica server:
        +
      • MariaDB Server 10.6
      • +
      +
    • +
    • When the SkySQL service usesĀ ES 10.5, the following versions are supported for the external replica server:
        +
      • MariaDB Server 10.5
      • +
      • MariaDB Server 10.6
      • +
      +
    • +
    • When the SkySQL service usesĀ ES 10.4, the following versions are supported for the external replica server:
        +
      • MariaDB Server 10.4
      • +
      • MariaDB Server 10.5
      • +
      • MariaDB Server 10.6
      • +
      +
    • +
    +

    Create User for Outbound Replication

    +

    With the default database admin user provided, create an external_replication user as seen below.

    +
    CREATE USER 'replication_user'@'%' IDENTIFIED BY 'bigs3cret';
    +GRANT REPLICATION SLAVE ON *.* TO ā€˜external_replicationā€™@'hostname';
    +
    +

    Check User Account

    +

    On the SkySQL service, confirm that the new user has sufficient privileges by executingĀ 

    +

    SHOW GRANTS FOR 'external_replication'@'%';
    +
    +
    +-------------+
    +| Grants for external_replication@%                                                                                                              |
    ++-------------+
    +| GRANT REPLICATION SLAVE, SLAVE MONITOR ON *.* TO `external_replication`@`%` IDENTIFIED BY PASSWORD '*CCD3A959D6A004B9C3807B728BC2E55B67E10518' |
    ++-------------+
    +

    +

    Add External Replica to Allowlist

    +

    On the SkySQL Customer Portal, add the IP address of the external replica server to the SkySQL service'sĀ allowlist +- Click ā€˜Manageā€™ā†’ ā€˜Manage Allowlistā€™ to add the IP address to the allowed list.

    + + +

    Obtain GTID Position

    +

    On the SkySQL service, obtain the GTID position from which to start replication.

    +

    When you want to start replication from the most recent transaction, the current GTID position can be obtained by querying the value of theĀ 'gtid_current_pos:

    +
    SHOW GLOBAL VARIABLES
    +   LIKE 'gtid_current_pos';
    +
    +
    `+------------------+-------------------+
    +| Variable_name    | Value             |
    ++------------------+-------------------+
    +| gtid_current_pos | 435700-435700-124 |
    ++------------------+-------------------+`
    +
    +

    Configure GTID Position

    +

    On the external replica server, configure the GTID position from which to start replication.

    +

    The GTID position can be configured by setting theĀ 'gtid_slave_pos':

    +
    SET GLOBAL gtid_slave_pos='435700-435700-124';
    +
    +

    Configure Replication

    +

    On the external replica server, configure replication using theĀ connection parameters for your SkySQL service.

    +

    Replication can be configured using theĀ 'CHANGEĀ MASTERĀ TO' SQLĀ statement:

    +
    CHANGE MASTER TO
    +   MASTER_HOST='FULLY_QUALIFIED_DOMAIN_NAME',
    +   MASTER_PORT=TCP_PORT,
    +   MASTER_USER='external_replication',
    +   MASTER_PASSWORD='my_password',
    +   MASTER_SSL=1,
    +   MASTER_SSL_CA='~/PATH_TO_PEM_FILE',
    +   MASTER_USE_GTID=slave_pos;
    +
    +
      +
    • ReplaceĀ FULLY_QUALIFIED_DOMAIN_NAMEĀ with the Fully Qualified Domain Name of your service
    • +
    • ReplaceĀ TCP_PORTĀ with the read-write or read-only port of your service
    • +
    • ReplaceĀ ~/PATH_TO_PEM_FILEĀ with the path to the certificate authority chain (.pem) file
    • +
    +

    Start Replication

    +

    On the external replica server, start replication.

    +

    Replication can be started using theĀ 'STARTĀ REPLICA' SQLĀ statement:

    +
    START REPLICA;
    +
    +

    Finally, Check Replication Status

    +

    On the external replica server, check replication status.

    +

    Replication status can be checked using theĀ 'SHOWĀ REPLICAĀ STATUS' SQLĀ statement:

    +
    SHOW REPLICA STATUS \G
    +
    +*************************** 1. row ***************************
    +                Slave_IO_State: Waiting for master to send event
    +                   Master_Host: my-service.mdb0002147.db.skysql.net
    +                   Master_User: external_replication
    +                   Master_Port: 5003
    +                 Connect_Retry: 60
    +               Master_Log_File: mariadb-bin.000001
    +           Read_Master_Log_Pos: 558
    +                Relay_Log_File: mariadb-relay-bin.000002
    +                 Relay_Log_Pos: 674
    +         Relay_Master_Log_File: mariadb-bin.000001
    +              Slave_IO_Running: Yes
    +             Slave_SQL_Running: Yes
    +               Replicate_Do_DB:
    +           Replicate_Ignore_DB:
    +            Replicate_Do_Table:
    +        Replicate_Ignore_Table:
    +       Replicate_Wild_Do_Table:
    +   Replicate_Wild_Ignore_Table:
    +                    Last_Errno: 0
    +                    Last_Error:
    +                  Skip_Counter: 0
    +           Exec_Master_Log_Pos: 558
    +               Relay_Log_Space: 985
    +               Until_Condition: None
    +                Until_Log_File:
    +                 Until_Log_Pos: 0
    +            Master_SSL_Allowed: Yes
    +            Master_SSL_CA_File: /var/lib/mysql/skysql_chain.pem
    +            Master_SSL_CA_Path:
    +               Master_SSL_Cert:
    +             Master_SSL_Cipher:
    +                Master_SSL_Key:
    +         Seconds_Behind_Master: 0
    + Master_SSL_Verify_Server_Cert: No
    +                 Last_IO_Errno: 0
    +                 Last_IO_Error:
    +                Last_SQL_Errno: 0
    +                Last_SQL_Error:
    +   Replicate_Ignore_Server_Ids:
    +              Master_Server_Id: 435701
    +                Master_SSL_Crl: /var/lib/mysql/skysql_chain.pem
    +            Master_SSL_Crlpath:
    +                    Using_Gtid: Slave_Pos
    +                   Gtid_IO_Pos: 435700-435700-127
    +       Replicate_Do_Domain_Ids:
    +   Replicate_Ignore_Domain_Ids:
    +                 Parallel_Mode: optimistic
    +                     SQL_Delay: 0
    +           SQL_Remaining_Delay: NULL
    +       Slave_SQL_Running_State: Slave has read all relay log; waiting for more updates
    +              Slave_DDL_Groups: 0
    +Slave_Non_Transactional_Groups: 0
    +    Slave_Transactional_Groups: 0
    +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Data offloading/index.html b/Data offloading/index.html new file mode 100644 index 00000000..bbf4aa04 --- /dev/null +++ b/Data offloading/index.html @@ -0,0 +1,2844 @@ + + + + + + + + + + + + + + + + + + + + + + + Data offloading - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Data offloading

    +

    There are multiple options to copy/offload data from a SkySQL DB. You can do a logical dump(i.e. output all data and DDL as SQL) to your local machine. Or, dump large data sets securely using the SkySQL Backup service to your own S3 or GCS bucket.

    +

    You can then use the offloaded data to resurrect the DB elsewhere. You can also optionally setup "outbound replication" to keep the new DB in sync with SkySQL.

    +

    1. Offload your Database using mariadb-dump

    +

    The mariadb-dump utility is a powerful command-line tool that allows you to export databases, tables, or specific data from your MariaDB instance in SkySQL.

    +

    Prerequisites

    +

    Ensure you have the mariadb-dump utility installed on your system. See here +Obtain the necessary connection details for your SkySQL instance, including the host, username, and password.

    +

    Exporting All Databases

    +

    To export all databases from your SkySQL instance, use the following command:

    +
    mariadb-dump -h your_skysql_host -u your_username -p \
    +    --all-databases > all_databases_backup.sql
    +
    +
      +
    • -h your_skysql_host: Specifies the host of your SkySQL instance.
    • +
    • -u your_username: Specifies the username to connect to the SkySQL instance.
    • +
    • -p: Prompts for the password for the specified username.
    • +
    • --all-databases: Exports all databases in the SkySQL instance.
    • +
    • > all_databases_backup.sql: Redirects the output to a file named all_databases_backup.sql.
    • +
    +

    Exporting Selected Databases

    +

    To export specific databases, list the database names after the connection details:

    +
    mariadb-dump -h your_skysql_host -u your_username \
    +    -p database1 database2 > selected_databases_backup.sql
    +
    +
      +
    • database1 database2: Replace with the names of the databases you want to export.
    • +
    • `> selected_databases_backup.sql: Redirects the output to a file named selected_databases_backup.sql.
    • +
    +

    Exporting Just the Schema

    +

    To export only the schema (structure) of a database without the data, use the --no-data option:

    +
    mariadb-dump -h your_skysql_host -u your_username -p \
    +    --no-data your_database > schema_backup.sql
    +
    +
      +
    • --no-data: Ensures that only the schema is exported, not the data.
    • +
    • your_database: Replace with the name of the database whose schema you want to export.
    • +
    • > schema_backup.sql: Redirects the output to a file named schema_backup.sql.
    • +
    +

    File Format of Exported Data

    +

    The files exported by mariadb-dump are in plain text format and contain SQL statements. These files can be used to recreate the databases, tables, and data by executing the SQL statements in a MariaDB instance.

    +
      +
    • Database Creation: The file begins with statements to create the databases.
    • +
    • Table Creation: For each table, the file includes CREATE TABLE statements that define the table structure.
    • +
    • Data Insertion: If data is included, the file contains INSERT INTO statements to populate the tables with data.
    • +
    • Comments: The file may include comments that provide additional information about the export process.
    • +
    +

    Example of Exported File Content

    +

    Here is a snippet of what an exported file might look like: +

    -- MariaDB dump 10.16  Distrib 10.5.9-MariaDB, for debian-linux-gnu (x86_64)
    +--
    +-- Host: your_skysql_host    Database: 
    +-- ------------------------------------------------------
    +-- Server version   10.5.9-MariaDB-1:10.5.9+maria~focal
    +
    +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
    +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
    +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
    +/*!40101 SET NAMES utf8mb4 */;
    +
    +--
    +-- Database: `your_database`
    +--
    +
    +-- --------------------------------------------------------
    +
    +--
    +-- Table structure for table `your_table`
    +--
    +
    +DROP TABLE IF EXISTS `your_table`;
    +/*!40101 SET @saved_cs_client     = @@character_set_client */;
    +/*!40101 SET character_set_client = utf8 */;
    +CREATE TABLE `your_table` (
    +  `id` int(11) NOT NULL AUTO_INCREMENT,
    +  `name` varchar(255) DEFAULT NULL,
    +  `created_at` datetime DEFAULT NULL,
    +  PRIMARY KEY (`id`)
    +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
    +/*!40101 SET character_set_client = @saved_cs_client */;
    +
    +--
    +-- Dumping data for table `your_table`
    +--
    +
    +LOCK TABLES `your_table` WRITE;
    +/*!40000 ALTER TABLE `your_table` DISABLE KEYS */;
    +INSERT INTO `your_table` VALUES (1,'Example Name','2023-01-01 00:00:00'),(2,'Another Name','2023-01-02 00:00:00');
    +/*!40000 ALTER TABLE `your_table` ENABLE KEYS */;
    +UNLOCK TABLES;
    +

    +

    Finally, here is the reference to the utility where you will find all theĀ many command-line options

    +
    +

    Note

    +

    Egress charges may apply when data is exported

    +
    +

    2. Using MariaDB client

    +

    UseĀ MariaDB Client with the connection information to export your schema from your SkySQL database service. Here is an example to export all rows from a single table:

    +
    mariadb --host FULLY_QUALIFIED_DOMAIN_NAME --port TCP_PORT \
    +      --user DATABASE_USER --password \
    +      --ssl-verify-server-cert \
    +      --default-character-set=utf8 \
    +      --batch \
    +      --skip-column-names \
    +      --execute='SELECT * FROM accounts.contacts;' \
    +      > contacts.tsv
    +
    +
      +
    • ReplaceĀ FULLY_QUALIFIED_DOMAIN_NAMEĀ with the Fully Qualified Domain Name of your service.
    • +
    • ReplaceĀ TCP_PORTĀ with the read-write or read-only port of your service.
    • +
    • ReplaceĀ DATABASE_USERĀ with the default username for your service, or the username you created.
    • +
    • Optionally, for large tables, specify theĀ -quickĀ command-line option to disable result caching and reduce memory usage.
    • +
    • You can customize the SQL along with providing multiple SQL statements to -execute.
    • +
    +

    3. Exporting Data Using SkySQL Backup Service API to S3 or GCS Bucket

    +

    The SkySQL Backup service API allows you to perform logical and physical dumps of your SkySQL databases to external storage buckets such as Amazon S3 or Google Cloud Storage (GCS).

    +

    Prerequisites

    +
      +
    • Obtain the necessary credentials for your S3 bucket.
    • +
    • Ensure you have access to the SkySQL Backup service API. You need to generate the API Key from the portal.
    • +
    • Obtain the service ID for your SkySQL instance.
    • +
    • Base64 encode your S3 credentials.
    • +
    +

    Performing a Logical Dump to an S3 Bucket

    +

    To perform a logical dump of a SkySQL database to an S3 bucket, you need to make an API call to the SkySQL Backup service. Below is an example of how to do this.

    +

    Example API Call for Logical Dump (The output is all SQL statements) +

    curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules' \
    +--header 'Content-Type: application/json' \
    +--header 'Accept: application/json' \
    +--header 'X-API-Key: ${API_KEY}' \
    +--data '{
    +    "backup_type": "logical",
    +    "schedule": "once",
    +    "service_id": "your_service_id",
    +    "external_storage": {
    +        "bucket": {
    +            "path": "s3://your_s3_bucket_name/path/to/backup",
    +            "credentials": "your_base64_encoded_credentials"
    +        }
    +    }
    +}'
    +

    +
      +
    • backup_type: Set to "logical" for a logical dump.
    • +
    • schedule: Set to "once" to schedule the backup immediately.
    • +
    • service_id: The ID of your SkySQL service.
    • +
    • external_storage.bucket.path: The S3 bucket path where the backup will be stored.
    • +
    • external_storage.bucket.credentials: Base64 encoded S3 credentials.
    • +
    +

    Performing a Physical Dump to an S3 Bucket

    +

    When databases are large and you want to move the data around securely this is likely the best option. To perform a physical dump of a SkySQL database to an S3 bucket, you need to make a similar API call but specify the backup type as "physical".

    +

    Example API Call for Physical Dump +

    curl --location 'https://api.skysql.com/skybackup/v1/backups/schedules' \
    +--header 'Content-Type: application/json' \
    +--header 'Accept: application/json' \
    +--header 'X-API-Key: ${API_KEY}' \
    +--data '{
    +    "backup_type": "physical",
    +    "schedule": "once",
    +    "service_id": "your_service_id",
    +    "external_storage": {
    +        "bucket": {
    +            "path": "s3://your_s3_bucket_name/path/to/backup",
    +            "credentials": "your_base64_encoded_credentials"
    +        }
    +    }
    +}'
    +

    +
      +
    • backup_type: Set to "physical" for a physical dump.
    • +
    • schedule: Set to "once" to schedule the backup immediately.
    • +
    +

    Checking the Status of Initiated Backups

    +

    Backups are always scheduled as jobs and the time taken will depend on the size of yourDB. To check the status of the initiated backups, you can use the following API call:

    +
    Example API Call to Check Backup Status
    +curl --location 'https://api.skysql.com/skybackup/v1/backups/status' \
    +--header 'Content-Type: application/json' \
    +--header 'Accept: application/json' \
    +--header 'X-API-Key: ${API_KEY}' \
    +--data '{
    +    "service_id": "your_service_id"
    +}'
    +
    +
      +
    • service_id: The ID of your SkySQL service. +This API call will return the status of the backups, including whether they are in progress, completed, or failed.
    • +
    +

    4. Replicating changes from SkySQL to a compatible external DB

    +

    See Replicating data From SkySQL to External Database for details.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/FAQs/index.html b/FAQs/index.html new file mode 100644 index 00000000..dc9dec93 --- /dev/null +++ b/FAQs/index.html @@ -0,0 +1,4771 @@ + + + + + + + + + + + + + + + + + + + + + + + FAQs - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    FAQs

    +

    About

    +

    What is the history of SkySQL?

    +

    SkySQL is a database-as-a-service (DBaaS) that was originally developed and managed by MariaDB Corporation. The cloud division (SkySQL) was later spun out of MariaDB into a independent company - SkySQL Inc. The team that developed SkySQL transitioned over to the new company.

    +

    How do I sign up for SkySQL?

    +
      +
    1. Go toĀ https://app.skysql.com
    2. +
    3. Log in or register using social login or email.
    4. +
    5. Once registered, you can get started right away byĀ launching a service,Ā connecting, andĀ loading data.
    6. +
    +

    Why would I choose a DBaaS over an on-premises deployment?

    +

    Our platform and experts handle the infrastructure needs, allowing you to focus on your core business.

    +

    When you choose SkySQL, a full-featured DBaaS, you forego the capital expenditure of buying hardware, the delay of waiting for new systems to ship every time you need to scale-up or scale-out, and the overhead and opportunity-cost of tuning, monitoring, and upgrading your database. SkySQL also handles routine tasks such as nightly backups and infrastructure maintenance.

    +

    And if you need ultimate control, and have the necessary skills and resources to hand-pick instances and tune configurations, we offer SkySQLĀ Power TierĀ to deliver all of the on-premises benefits without the capex (capital expense) and operational overhead.

    +

    How long do deployments on SkySQL take?

    +

    In Foundation Tier smaller databases launch in 2-4 mins. Power Tier deployments with isolated, dedicated Kubernetes environments can take up to 25 minutes. Subsequent database deployments will use the same Kubernetes environment and usually launch in 2-4 minutes.

    +

    The real time benefits come every day after, when you're operating at scale. A failed database node can recover in a matter of seconds using Kubernetes self-healing, or instantly failover to alternate replicated server. Instead of an eight-hour bare metal rebuild as you might see on-premises or on other cloud platforms.

    +

    What version of MariaDB does SkySQL support?

    +

    SkySQL provides services backed by:

    +
      +
    • MariaDB Enterprise Server 10.5
    • +
    • MariaDB Enterprise Server 10.6
    • +
    • MariaDB Community Server 10.11
    • +
    • MariaDB Community Server 11.4
    • +
    • MariaDB MaxScale
    • +
    +

    Are other databases supported?

    +

    No, SkySQL is dedicated to being the top choice for MariaDB. Our goal is to provide the best price-performance of any DBaaS, offer significant productivity improvements through automation, and serve as the most comprehensive end-to-end platform for all your database needs.

    +

    We offer ā€œfractional DBAsā€ - expert-maintained multi-cloud databases. We're glad to help with your move to SkySQL, whether you'reĀ migratingĀ from another database platform or looking toĀ lift-and-shiftĀ a MariaDB implementation to SkySQL.

    +

    What hardware does SkySQL run on?

    +

    SkySQL is multi-cloud and as a full-featured DBaaS we handle all of the hardware and infrastructure needs.

    +

    Services are currently available with a range ofĀ instance sizesĀ running on the following cloud service providers:

    +
      +
    • Amazon AWS (Amazon Web Services)
    • +
    • Google GCP (Google Cloud Platform)
    • +
    +

    Transactional services (such as our Replicated Transactions topology) operate on:

    + +

    What software stack does SkySQL run on?

    +

    SkySQL runs on Amazon Elastic Kubernetes Service (EKS), Google Kubernetes Engine (GKE), and MariaDB database products - Enterprise Server, MaxScale. MariaDB Enterprise Server enables a predictable development and operations experience through optimized builds, predictable release behavior, and vendor support.

    +

    How long has SkySQL existed?

    +

    SkySQLĀ officially launchedĀ as a production-ready enterprise-grade DBaaS in 2020, after extensive pre-release testing.

    +

    In December 2023, MariaDB completed the spinoff of its SkySQL business to SkySQL Inc., as a new independent entity founded by the former MariaDB team that built and +supported the SkySQL product.

    +

    Is SkySQL ready for production use?

    +

    Yes. SkySQL delivers enterprise-grade cloud database services for mission-critical applications. SkySQL is built to make MariaDB Enterprise ready - optimized for security, stability, and reliability in demanding production environments. Multi-node database deployments feature a comprehensiveĀ SLA, High Availability (HA) features, and operations features.Ā Enterprise supportĀ options extend support to 24x7, with the additional option ofĀ SkyDBAĀ for reactive and proactive assistance from a team of expert DBAs.Ā Security featuresĀ are designed to meet the GRC and infosec challenges faced by modern applications. [//]: # , andĀ DPA (GDPR) and BAA (HIPAA)Ā are available.

    +

    SkySQL Features

    +

    What services are available on SkySQL?

    +

    SkySQL is primarily designed for online applications and offers two topologies -

    +
      +
    • Replicated: Useful for mission-critical, production workloads requiring read scaling. Replicated services feature 1 primary and up to 4 replicas and uses MariaDB MaxScale for load balancing and automatic zero-interruption failover.
    • +
    • Single Node: Useful for low-cost development and test transactional workloads. Single Node services cannot be scaled to Replicated topologies.
    • +
    +

    What options are available for scaling and right-sizing SkySQL?

    +

    You can chooseĀ topologies to match your workload requirements, cloud regions to match your latency and operating requirements, instance sizes,Ā andĀ support plan.

    +

    Our platform features:

    +
      +
    • Availability in a range of databaseĀ instance sizes and storage sizes
    • +
    • Availability from multipleĀ AWS (Amazon Web Services) and GCP (Google Cloud Platform) regions.
    • +
    • Load Balancing features included withĀ Replicated Transactions topologiesĀ allow for read-scaling through read-write splitting.
    • +
    • Custom instance sizes (forĀ Power TierĀ customers)
    • +
    • Range ofĀ support options
    • +
    +

    What reliability features are available on SkySQL?

    +
      +
    • SkySQL is operated by a global team of Site Reliability Engineers (SRE), expert DBAs, and MariaDB software engineers. Platform problems are escalated to our team 24x7.
    • +
    • Support from MariaDB Corporation, including Enterprise and Platinum tiers optionally with SkyDBA for reactive and proactive assistance
    • +
    • Service Level Agreement, including an elevated SLA forĀ Power Tier customers
    • +
    • Kubernetes self-healing - Databases run in containers in kubernetes clusters and auto-heal.
    • +
    • Load balancing for multi-node configurations using MariaDB MaxScale
    • +
    • High Availability (HA) for multi-node configurations
    • +
    • MaxScale Redundancy option
    • +
    • Inbound and outbound replication - you can replicate to your self managed MariaDB anywhere.
    • +
    +

    What operations features are available on SkySQL?

    +
      +
    • Support from SkySQL Inc, including Enterprise and Platinum tiers optionally with SkyDBA for reactive and proactive assistance
    • +
    • Vendor managed infrastructure and platform
    • +
    • SkySQL Portal and SkySQL DBaaS API for instance management
    • +
    • Compatibility with most programming languages and clients that work with MariaDB or MySQL, for off-the-shelf integration to your stack
    • +
    • Scheduled upgrades to database software
    • +
    • Automated nightly backups
    • +
    • Configuration management
    • +
    • On-demand backups and Snapshots
    • +
    • Monitoring
    • +
    • Ability to deploy additional services to support application migrations and testing on the same configuration used in production
    • +
    • On-demand tear-down of unneeded services
    • +
    +

    What governance, risk, compliance, and information security features are available on SkySQL?

    +
      +
    • Firewall protection, including dedicated IP allowlists to access databases and to access monitoring features
    • +
    • Data-at-rest encryption
    • +
    • Data-in-transit encryption by default
    • +
    • VPC peering, AWS PrivateLink and GCP Private Service Connect options
    • +
    • Standard or enterprise authentication for management portal
    • +
    • Standard, LDAP, or 2FA database authentication
    • +
    • Business Associate Addendum (BAA) for HIPAA
    • +
    • Data Processing Addendum (DPA) for GDPR
    • +
    +

    Pricing

    +

    What does SkySQL cost? How is SkySQL priced?

    +

    Estimated SkySQL pricing is available from theĀ SkySQL portal. SkySQL pricing is very competitive and starts at about $100 per month for production grade databases.

    +

    SkySQL pricing varies based on the selections made when youĀ launch a service. Examples of selections include provider, topology, instance and storage size, and region.

    +

    The pricing shown is not a quote and does not guarantee the cost for your actual use of SkySQL services, as is shown on monthly invoices. The cost estimate can vary from your actual costs forĀ several reasons.

    +

    Do I need to purchase a MariaDB Server license or subscription to use SkySQL?

    +

    No. Purchase of SkySQL service includesĀ supportĀ and access toĀ MariaDB database products on SkySQL.

    +

    I have an existing contract with Google. Can I leverage this for SkySQL?

    +

    Yes. SkySQL is listed in the Google Cloud Marketplace. Customers have the ability to retire their GCP commitment with a SkySQL subscription via the Marketplace.

    +

    See the Marketplace listing.

    +

    Contact usĀ if you have further questions.

    +

    I have an existing contract with AWS. Can I leverage this for SkySQL?

    +

    Yes. SkySQL is an AWS partner network. Customers can retire their AWS commitment with a SkySQL subscription via the AWS Marketplace.

    +

    See the AWS Marketplace listing

    +

    Contact usĀ if you have further questions.

    +

    Do you have a pricing calculator?

    +

    Estimated SkySQL pricing information is shown when youĀ create a serviceĀ based on the selections you make at launch time, such as topology, region, and instance size. PleaseĀ contact usĀ for assistance in cost estimation, includingĀ supportĀ andĀ Power Tier.

    +

    What is included in SkySQL pricing?

    +

    SkySQL pricing includes instances for a specific service topology, and monitoring, and also includes management features, e.g., backups, upgrades, patch installs, etc.Ā Some factors, such as object storage and network egress which are variable and usage-dependent, are not included in estimated pricing. We typically pass-thru the cloud provider costs with no additional markup.

    +

    What is optional in SkySQL pricing?

    +

    Add-ons are available to optimize your SkySQL experience:

    +
      +
    • SkySQL Power TierĀ is a premium service offering for SkySQL customers who have the most critical requirements for uptime, availability, performance, and support.
    • +
    • While all Foundation Tier services include Standard Support, Power Tier customers are offered the Ā Enterprise support plan.
    • +
    • An optional add-on,Ā SkyDBA, further extends the premium support experience and the capabilities of your in-house DBAs with the backing from a global team of expert MariaDB DBAs, available 24/7 for the most severe (P1) issues. SkySQL's SkyDBAs manage your SkySQL databases both proactively and reactively so you can focus on your core business.
    • +
    +

    Is discounted pricing available for a longer-term commitment?

    +

    Yes. Discounts are typically offered for one-year and three-year commitments. PleaseĀ contact usĀ for more information.

    +

    Payment

    +

    What forms of payment does SkySQL accept?

    +

    SkySQL accepts payment byĀ major credit card and through remittance accounts

    +

    Which credit cards does SkySQL accept?

    +

    SkySQL accepts all major credit cards. Specifically, we accept Visa, Mastercard, American Express, Discover, and Diners Club payments from customers worldwide.

    +
    +

    Note

    +

    SkySQL does not store any of your credit card information. We use Stripe to manage all credit card transactions. Stripe is a widely used payment processing platform that enables businesses to accept credit card payments securely

    +
    +

    How do I set up my account to pay by wire transfer/ACH?

    +

    Contact usĀ to have your account set up for payment by wire transfer or ACH.

    +

    Can I pre-fund my account?

    +

    SkySQL contract customers can pre-fund their account.Ā Contact usĀ for more information.

    +

    How do I pay my bill?

    +

    SkySQL charges are paid using a credit card, or via wire transfer/ACH upon invoice in the case of remittance accounts.

    +

    Can I buy SkySQL in the AWS Marketplace?

    +

    Yes. We offer direct purchase through the AWS Marketplace or we can craft a "private offer" to customize a subscription.

    +

    See the AWS Marketplace listing

    +

    Contact usĀ if you have further questions.

    +

    I have an existing contract with AWS. Can I leverage this for SkySQL?

    +

    Yes. SkySQL is an AWS partner network. Customers can retire their AWS commitment with a SkySQL subscription via the AWS Marketplace.

    +

    See the AWS Marketplace listing

    +

    Contact usĀ if you have further questions.

    +

    Can I buy SkySQL in the Google Marketplace?

    +

    Yes. We offer direct purchase through the Google Marketplace or we can craft a "private offer" to customize a subscription.

    +

    See the Marketplace listing.

    +

    Contact usĀ if you have further questions.

    +

    I have an existing contract with Google. Can I leverage this for SkySQL?

    +

    Yes. SkySQL is listed in the Google Cloud Marketplace. Customers have the ability to retire their GCP commitment with a SkySQL subscription via the Marketplace.

    +

    See the Marketplace listing.

    +

    Contact usĀ if you have further questions.

    +

    Billing and Invoices

    +

    Am I charged for deleted or stopped databases? What database states are billable?

    +

    If youĀ stop a SkySQL service, you will continue to be charged for storage, since your data is not deleted. Instance and egress charges will stop until the instance is started again.

    +

    How can I see my current charges?

    +

    Current month'sĀ estimated chargesĀ can be viewed on the SkySQL portal dashboard. Detailed information is also available under ā€˜Billingā€™ where you can see the breakdown for all your current charges - you can see resource usage by Service Name (your individual DB clusters) or by resource type. Variable charges such as object storage and network egress are updated the day prior to the last day of the month and are available in the invoice. You can also use the SkySQL REST API to fetch usage and billing data.

    +

    How can I see detailed billing reports?

    +

    SkySQL invoices are sent monthly and include a detailed breakdown of usage, pricing, and taxes. For Team accounts, only the Team owner has access to Account Information.

    +

    When will I be billed?

    +

    Invoices for SkySQL are sent by email on subscription renewal. Subscription renewal occurs on the last day of the month. Accounts using a credit card are charged at this time.

    +

    Will I be charged VAT or taxes?

    +

    MariaDB will bill for VAT and/or taxes in applicable jurisdictions. Customers are responsible for paying all applicable taxes and fees. See theĀ SkySQL Terms of UseĀ for additional information.

    +

    How can I see discounts and service credits?

    +

    Current month'sĀ estimated charges, including coupons and service credits, can be viewed on theĀ Account InformationĀ page and are updated six times per day.

    +

    In the event of service credits issued based onĀ SLA, service credits will be included in coupons and service credits on theĀ Account InformationĀ page.

    +

    Can I stop or pause my instance to save money?

    +

    Instances can beĀ stoppedĀ to save money. While stopped, additional instance and egress charges will not accrue, but you will continue to be charged for storage.

    +

    Who do I contact with billing questions?

    +

    ContactĀ info@skysql.comĀ with billing questions.

    +

    Backup and Restore

    +

    How do I backup my data on SkySQL?

    +

    SkySQL runs full backups automatically each night. SkySQL also allows on-demand backups (a Preview feature)

    +

    Can I set the frequency or schedule of automated backups?

    +

    No. Backup frequency and schedule are not customer configurable. SkySQL Power Tier customers should contact us if alternate backup frequency or schedule is required.

    +

    Are automated backups sent offsite? Will my data be sent to another country?

    +

    No. Data is not sent to another country. All data is managed within the same region where your database is running for data sovereignty.

    +

    Does SkySQL guarantee an RTO and RPO?

    +

    No.

    +

    Do backup operations impact application performance?

    +

    No.

    +

    MariaDB Enterprise Backup (mariabackup) is used for Replicated Transactions and Single Node Transactions service backups. MariaDB Enterprise Backup breaks up backups into non-blocking stages so writes and schema changes can occur during backups.

    +

    Are incremental backups available?

    +

    The backup service provides support for incremental backups. It is in preview state (Dec 2023).

    +

    How long are backups retained?

    +

    Backups for running and stopped services are retained for 30 days. If a service is deleted, no further backups for that service are produced and backups on hand are purged after 7 days.

    +

    Can I set the retention window for automated backups?

    +

    No. Backup retention is not customer configurable. SkySQL Power Tier customers should contact us if an alternate retention schedule is required.

    +

    How do I restore my data from a SkySQL backup?

    +

    Request data restore by creating a support case in theĀ Customer Support Portal. Please state what you need restored, and the desired restore point. Self service Restore functionality is available using the Backup service and API (in Preview as of Dec 2023)

    +

    Can I request a partial restore of data from backup?

    +

    Yes, byĀ support case.

    +

    Does SkySQL support Point-in-Time Recovery (PITR)?

    +

    By default, full and complete backup restoration is available. To enable point-in-time recovery, services must be configured in advance for additional binary log retention.Ā Point-in-time recovery (PITR)Ā configuration is available toĀ Power TierĀ customers.

    +

    Can I request restore of my data to a different region?

    +

    Yes, byĀ support case

    +

    Can I request restore of my data to a different topology?

    +

    Yes, byĀ support case

    +

    Can I retrieve my database backup from SkySQL? Is there vendor lock?

    +

    Yes, you can retrieve your database. No, there is no vendor lock. Your data is your data. Create aĀ support caseĀ for access to a backup.

    +

    Encryption

    +

    Does SkySQL support data-at-rest encryption (on-disk encryption)?

    +

    Yes. All SkySQL data is encrypted on disk.

    + +

    Does SkySQL support data-in-transit encryption (over the network encryption)?

    +

    By default, SkySQL services feature data-in-transit encryption for client connections.

    +

    By default, server-to-server communications between the nodes of a SkySQL service are protected with data-in-transit encryption.

    +

    For additional information, see "Data-in-Transit Encryption".

    +

    Does SkySQL support encrypted client connections?

    +

    Yes. By default, SkySQL requires client connections via TLS (TLS 1.2, TLS 1.3).

    +

    Does SkySQL support unencrypted client connections?

    +

    SkySQL supportsĀ disabling SSL/TLS via the Portal or using the API.

    +

    What encryption algorithms are used for on-disk encryption?

    +

    SkySQL on Amazon AWS benefits fromĀ Amazon EBS encryption, which is AES256.

    +

    SkySQL on Google GCP leverages Google'sĀ default encryption, which is AES256 or AES128.

    +

    What versions of SSL or TLS are supported?

    +

    TLS 1.2, and TLS 1.3 are supported.

    +

    When do TLS certificates expire?

    +

    TLS certificates expire every two years.

    +

    How are TLS certificates and encryption keys managed?

    +

    MariaDB Corporation leverages HashiCorp Vault for certificate and key management. Certificates and keys are not customer-configurable.

    +

    Are client certificates supported?

    +

    No. SkySQL supports server-side certificates. Database users are authenticated by standard password authentication, LDAP, and/or Two-Factor Authentication (2FA).

    +

    Is ed25519 authentication supported?

    +

    No. While MariaDB Enterprise Server includes ed25519 support, SkySQL leverages a version of MariaDB MaxScale which is not ed25519-compatible.

    +

    Why do I need to download a certificate authority chain?

    +

    A certificate authority chain is provided to allow your client to establish a secure and encrypted connection to a SkySQL database service, confirming the authenticity of the server certificate.

    +

    How frequently are cryptography libraries (like OpenSSL) updated?

    +

    Cryptography libraries are included in our standard release process, and vulnerability scanning is conducted for each release.

    +

    Data Deletion

    +

    Can I delete my running or stopped SkySQL service?

    +

    Yes. The decision to delete your running service rests with you and your business. Please consider production impacts before deleting a service. SkySQL permits the on-demand deletion of running and stopped services.

    +

    Can I delete my pending SkySQL service?

    +

    No. The launch process must complete before deletion is permitted.

    +

    How long do you keep my data when I delete a service?

    +

    All data residing on a service's storage is deleted at time of service deletion. Backups for deleted services are purged after 7 days.

    +

    Can I get my data back if I delete a service by accident?

    +

    Maybe. If you contact us before the system completes data deletion, yes, we can recover. Backups for deleted services are purged after 7 days.

    +

    Is it possible for SkySQL to retain my data when I delete a service?

    +

    No. You should download your data so you have a local copy before you delete the service.

    +

    Will my data be retained if a hard drive gets swapped-out?

    +

    No. SkySQL is hosted on public cloud provider systems.

    + +

    Can data be purged from backups?

    +

    Yes. If you would like backups purged, pleaseĀ create a support case

    +

    Monitoring

    +

    How do I access monitoring?

    +

    You can SkySQL Monitoring after launching a service, then clicking the "Monitoring" link in the SkySQL main menu (left navigation).

    +

    What is monitored?

    +

    SkySQL Monitoring covers status and metrics specific to a service and its servers. AĀ complete list of chartsĀ is provided.

    +

    Who is alerted if a service goes down?

    +

    SkySQL Incā€™s Support and SRE teams are alerted if a SkySQL service becomes unavailable or when serious issues are detected (e.g. disk is 90% utilized).

    +

    Additionally, SkySQL automatically turns on several sensible alerting rules so the customer can also be alerted.

    +

    How can I get Alerted on DB events?

    +

    SkySQL Monitoring includes alerting features, which allow configurable alerting rules, notification channels, and notification criteria. These settings are managed from the SkySQL Monitoring interface. You can SkySQL Monitoring after launching a service by clicking the "Monitoring" link in the SkySQL main menu (left navigation).

    +

    Support

    +

    How do I contact support?

    +

    SkySQL customers can contact us via theĀ Customer Support Portal.

    +

    If you are not yet a SkySQL customer, pleaseĀ contact usĀ with questions.

    +

    What support options are available for SkySQL?

    +

    Included with Foundation Tier services:

    +
      +
    • Standard Support, 24x5
    • +
    +

    Available to Power Tier customers:

    +
      +
    • Enterprise Support, 24x7
    • +
    +

    SeeĀ full details of our support options.

    +

    Is 24x7x365 support available for mission-critical applications?

    +

    Yes.Ā Enterprise Support levels are available for customers requiring 24x7x365 support (24 hours per day, 7 days per week, 365 (or 366) days per year).

    +

    Is SkySQL fully managed?

    +

    Yes. SkySQL infrastructure is fully managed, including many typical operations features such as automated nightly backups and monitoring.

    +

    Standard support is included with Foundation Tier services. Activities like performance tuning and assistance with schema change is not included in standard support. Power Tier customers choose between Enterprise and Platinum support options, which include consultative support.

    +

    Our optionalĀ SkyDBAĀ service is available for Enterprise and Platinum support customers, and SkyDBA customers receive both reactive (break/fix) and proactive (analyze/enhance) assistance.

    +

    What professional services are available for SkySQL?

    +

    SkySQL offers a full range of professional services, including:

    + +

    Inquiries

    +

    Who do I contact if I have questions about SkySQL?

    +

    Contact SkySQL IncĀ or email us at info@skysql.com

    +

    How do I contact sales to buy services on SkySQL?

    +

    SkySQL is available for immediate use.Ā Get started now. If you would like assistance from sales,Ā contact us.

    +

    Who do I contact with billing questions?

    +

    Billing questions can be directed to Billing Team.

    +

    How do I provide feedback about SkySQL Documentation?

    +

    To aid our continuous improvement efforts, we encourage you to provide feedback on our documentation and your experiences using it via the following:

    + +

    I am a SkySQL customer. How do I get support?

    +

    SkySQL customers can contact us via theĀ Support Portal. Customers can also use the Support email.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/FractionalDBA/index.html b/FractionalDBA/index.html new file mode 100644 index 00000000..695878dc --- /dev/null +++ b/FractionalDBA/index.html @@ -0,0 +1,2738 @@ + + + + + + + + + + + + + + + + + + + + + + + Fractional DBA Service - SkyDBA - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Fractional DBA Service - SkyDBA

    +

    SkyDBA is a "Fractional" DBA Service, a cost-effective solution for businesses that need database administration but do not require a full-time database administrator. This service provides access to a team of experienced database administrators on a part-time basis. Whether it's for routine maintenance, troubleshooting, or strategic advice, SkyDBA's Fractional DBA Service ensures that expert assistance is just a message away. This approach not only saves the expense of a full-time employee but also provides a higher level of service due to the collective knowledge and experience of the SkyDBA team.

    +
    +

    Note

    +

    SkyDBA is an optional service that you can purchase. You can use this service regardless of the Tier (Foundation or Power) used to deploy DB services. For more information, please contact SkySQL support

    +
    +

    Here is what you can expect from this ā€œadd-onā€ service.

    +

    Migration Methodology & Advice

    +

    Expert advice available on migration methodology and procedures.

    +

    Query Optimization and Performance Tuning

    +

    SkyDBAs offers tailored query analysis and professional tuning upon request. Our team provides expert assistance in implementing low-impact table alterations when necessary.

    +

    Quarterly Business Review

    +

    With a SkyDBA subscription, your customer success manager can schedule quarterly business reviews with someone from the SkyDBA team to review items such as:

    +
      +
    • Historical usage focusing on peak
    • +
    • Future Growth/Capacity Planning
    • +
    • Recovery Time (RTO)/Recovery Point (RPO) Objectives
    • +
    • Escalation Points
    • +
    • Business Continuity
    • +
    +

    Quarterly Security Audits

    +

    Work with the SkyDBA team to ensure that your environment is safe and secure. This includes auditing of users and grants.

    +

    Proactive Monitoring and Incident Response

    +

    SkyDBAs monitors instances for potential business impact events. Upon detection, events are internally flagged. We investigate and collaborate with your team as needed for swift resolution.

    +

    Extended Troubleshooting/Analysis (Core Dumps, system logs, etc.)

    +

    With a SkyDBA Subscription, our database experts can assist with tasks such as analyzing core dumps, system logs and other similar technical issues that require expert database server knowledge.

    +

    Tailored Backup/Restore Strategies

    +

    Partner with SkyDBAs to create tailored backup stratagies to meet your organization's Recovery Time Objective (RTO) and Recovery Point Objective (RPO) goals.

    +

    Data Recovery Assistance and Validation

    +

    SkyDBAs can assists in data recovery from backups or other sources, providing expertise for analysis. Additionally, as requestied, SkyDBA can conduct annual Disaster Recovery exercises upon request to ensure preparedness. It's important to note that running a recovery to a secondary service may require additional compute resources.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/High Availability, DR/Failover_to_another_region.drawio.png b/High Availability, DR/Failover_to_another_region.drawio.png new file mode 100644 index 00000000..3e285045 Binary files /dev/null and b/High Availability, DR/Failover_to_another_region.drawio.png differ diff --git a/High Availability, DR/HA_in_single_region.drawio.png b/High Availability, DR/HA_in_single_region.drawio.png new file mode 100644 index 00000000..a98c3a3e Binary files /dev/null and b/High Availability, DR/HA_in_single_region.drawio.png differ diff --git a/High Availability, DR/Setup Global Replication/index.html b/High Availability, DR/Setup Global Replication/index.html new file mode 100644 index 00000000..ed3c225c --- /dev/null +++ b/High Availability, DR/Setup Global Replication/index.html @@ -0,0 +1,2761 @@ + + + + + + + + + + + + + + + + + + + + + + + Setup Global Replication - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Setup Global Replication

    +

    SkySQL offers a robust platform for managing databases in the cloud and supports Global Replication for various use cases including disaster recovery, cross-region failover and global distribution of data. In this guide, weā€™ll explore how to automate the creation, restoration and replication of SkySQL database services for global availability using the SkySQL API. We will use the following SkySQL resources for the setup:

    +
      +
    • Provisioning APIs: To launch primary and secondary SkySQL services.
    • +
    • Backup APIs: To backup the primary service and restore the data to the secondary service.
    • +
    • Replication Procedures: To setup active replication between the primary and the secondarymservices.
    • +
    +

    Step 1: Generate SkySQL API Key

    +

    1. Go to the User Profile page of the SkySQL Portal to generate an API key. +2. Export the value from the token field to an environment variable $API_KEY

    +
    $ export API_KEY='... key data ...'
    +
    +

    TheĀ API_KEYĀ environment variable will be used in the subsequent steps.

    +

    Step 2: Launch SkySQL Services

    +

    Launch two SkySQL services - a Primary that your application(s) will connect to and a Secondary that will act as a globally available service. If you already have your Primary service running, you simply need to create a new Secondary service.

    +
    +

    Note

    +

    You can launch these services using the Portal or Using the REST API as shown below. Launching a new service will take about 5 minutes.

    +
    +

    1. Following API requests will create two services in Google Cloud - 'skysql-primary' in the Virginia region and 'skysql-secondary' in the Oregon region.

    +
    curl --location --request POST https://api.skysql.com/provisioning/v1/services \
    +   --header "X-API-Key: ${API_KEY}" --header "Content-type: application/json" \
    +   --data '{
    +"service_type": "transactional",
    +"topology": "standalone",
    +"provider": "gcp",
    +"region": "us-east4",
    +"architecture": "amd64",
    +"size": "sky-2x8",
    +"storage": 100,
    +"nodes": 1,
    +"name": "skysql-primary",
    +"ssl_enabled": true
    +}'
    +
    +
    curl --location --request POST https://api.skysql.com/provisioning/v1/services \
    +   --header "X-API-Key: ${API_KEY}" --header "Content-type: application/json" \
    +   --data '{
    +"service_type": "transactional",
    +"topology": "standalone",
    +"provider": "gcp",
    +"region": "us-west1",
    +"architecture": "amd64",
    +"size": "sky-2x8",
    +"storage": 100,
    +"nodes": 1,
    +"name": "skysql-secondary",
    +"ssl_enabled": true
    +}'
    +
    +

    2. Each SkySQL service has a unique identifier. Please make note of the identifier shown in the API response. We will need it later.

    +

    Step 3: Backup the Primary and Restore to the Secondary Service

    +

    In a real world scenario, the Primary service will contain data which will need to be restored to the Standby service before the replication can be set up. SkySQL performs full backup of your services every night. You can either use an existing nightly backup or create a schedule to perform a new full backup.

    +
    +

    Note

    +

    Depending on the size of your databases, backing up a service can take substantial time. Creating a new backup is not necessary if you already have an existing full backup of your service. If you have a recent backup (usually available) you can skip the step. After we restore from the backup we have to replay all the subsequent DB changes from the Source DB 'binlog'. Binlogs expire in 4 days, by default. So, you cannot use a backup older than 4 days.

    +
    +

    1. Use the following API to list backups associated with the Primary service. Replace {id} with the database id of the Primary service. Look for a "FULL" backup or "snapshot".

    +
    curl --location --request GET https://api.skysql.com/skybackup/v1/backups?service_id={id} \
    +   --header "X-API-Key: ${API_KEY}" --header "Content-type: application/json"
    +
    +

    You can also look for recent "FULL" backups from the Portal. If not available you can also initiate a backup from the Portal or using the API below.

    +

    1. Use the following API to create a one-time schedule to perform a new full backup. Replace {id} with the id of the Primary service.

    +
    curl --location --request POST https://api.skysql.com/skybackup/v1/backups/schedules \
    +   --header "X-API-Key: ${API_KEY}" --header "Content-type: application/json" \
    +   --data '{
    +"backup_type": "full",
    +"schedule": "once",
    +"service_id": "{id}"
    +}'
    +
    +

    2. Each backup also has a unique identifier. Make note of the identifier shown in the API response. Now use the following API to restore the backup to the Secondary service.

    +
    +

    Note

    +

    Please note that restoring the backup on a SkySQL service will stop the service if it is running and will wipe out all existing data.

    +
    +

    Replace {backup-id} with the backup id that you want to restore and {service-id} with the id of the Secondary service.

    +
    curl --location --request POST https://api.skysql.com/skybackup/v1/restores \
    +   --header "X-API-Key: ${API_KEY}" --header "Content-type: application/json" \
    +   --data '{
    +"id": "{backup-id}}",
    +"service_id": "{service-id}"
    +}'
    +
    +
    +

    Note

    +

    As of July 2024, you can only restore from Backups within the same Cloud provider. To restore to a different provider, you would need to explicitly Backup to your own S3/GCS bucket, copy the folder over to the other provider's bucket and initiate a Restore. Please refer to the Backup Service docs.

    +
    +
    +

    Note

    +

    Once the restore is complete, the default username and password displayed in "connect" window of the Secondary service will not work. Restore overwrites this information with the username and password of the Primary service. Hence, you will have to use Primary service's username and password to connect to the Secondary service.

    +
    +

    Step 4: Set up Replication between the Primary and the Secondary

    +

    1. Since we want to set up replication between the two SkySQL services, the Secondary service should be able to connect to the Primary service. Add the Outbound IP address of the Secondary service to the Allowlist of the Primary service. Outbound IP can be obtained from "Service Details" page in the SkySQL portal. Please add this IP to the allow list of Primary service in the portal.

    +

    2. Next, obtain the GTID position from which to start the replication by using the following API. Please replace {service_id} with the service id of the primary service. +

    curl --location --request GET "https://api.skysql.com/skybackup/v1/backups?service_id={service_id}" \
    +  --header "X-API-Key: ${API_KEY}" --header "Content-type: application/json" | jq
    +
    +Make note of the gtid position ("binlog_gtid_position") in the API response output.

    +

    3. Now configure the Secondary service by calling the following stored procedure. Replace 'host' and 'port' with the Primary service's hostname and port. Replace 'gtid' with the GTID position obtained from the previous setp. Use true/false for whether to use SSL.

    +

    CALL sky.change_external_primary_gtid(host, port, gtid, use_ssl_encryption);
    +
    +Alternatively, the above command can be used with "binlog_file" and "binlog_position" output from step #2 above.

    +
    CALL sky.change_external_primary
    +  ('dbpwfxxxx.sysp0000.db1.skysql.com',
    +   3306,
    +   'mariadb-bin.000007',
    +   xxxxx,
    +   true);
    +
    +

    If successfull, you should see an output similar to below.

    +
    +-----------------------------------------------------------------------------------------------------------------------------------------+
    +| Run_this_grant_on_your_external_primary                                                                                                 |
    ++-----------------------------------------------------------------------------------------------------------------------------------------+
    +| GRANT REPLICATION SLAVE ON *.* TO 'skysql_replication_dbpwxxxxx'@'174.x.x.x' IDENTIFIED BY 'xxxxxxxxxx'; |
    ++-----------------------------------------------------------------------------------------------------------------------------------------+
    +
    +

    Please copy the "GRANT REPLICATION..." command from the output and run in the primary service.

    +

    4. Start replication and check status on the Secondary service using the following procedures:

    +
    CALL sky.start_replication();
    +CALL sky.replication_status();
    +
    +

    5. Once the replication is setup, verify the status of the new database service in the SkySQL console. Ensure that the service is replicating for your use case for global replication.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/High Availability, DR/index.html b/High Availability, DR/index.html new file mode 100644 index 00000000..d94ca183 --- /dev/null +++ b/High Availability, DR/index.html @@ -0,0 +1,2652 @@ + + + + + + + + + + + + + + + + + + + + + + + Higher Availability and Disaster Recovery Concepts - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Higher Availability and Disaster Recovery Concepts

    +
    +

    Note

    +

    SkySQL provides HA using semi-synchronous replicas. Unlike hyperscalers these replicas are not standy DB servers but actively used for Reads. When the primary crashes our intelligent proxy allows us to failover near instantly to an alternate replica. Or, failback when the original primary recovers. Ensuring data consistency even when replicas have a replication lag through ā€œcausal readsā€, or transaction replay.

    +
    +
    +

    Use 'Replicated Topoplogy' for HA

    +

    For HA and Load balancing client requests there is no configuration required. Just launch a replicated topology DB service. SkySQL automatically starts a intelligent proxy that does all the heavy lifting. Detecting failures and replaying transactions, awareness of who the primary is at all times, balancing load and much more.

    +

    You should be aware of the causal_reads configuration as outlined below. The sections below provide a more detailed description of how SkySQL delivers on HA and scaling across replicas.

    +

    Level 1 Resiliency - container health checks, compute-storage isolation

    +

    To provide high resiliency we try to protect every layer of the stack ā€“ disks, compute, Zones/cloud regions, network and even the load balancer accepting incoming DB connections. The graphic below depicts this architecture. LetŹ¼s peel the onion a bit.

    +

    All Cloud databases configured for HA replicate the data across multiple availability zones (AZ). Ensuring your data is protected against data center failures. This is necessary, but not sufficient. In SkySQL, data is always isolated from compute on the underlying block storage device of each AZ. This device keeps a copy of each block on multiple servers providing the first layer of protection against component failures or corruption.

    +

    The deployment of DB servers occurs within containers orchestrated by Kubernetes (k8s). In the event of cloud instance failures, SkySQLā€™s health monitoring prompts k8s to revive the container in an alternate instance, seamlessly reconnecting to the same storage volume. AWS RDS, for example, runs MariaDB on VMs requiring a replicated setup for any protection against node failures.

    +

    Level 2 Resiliency - Failover using Intelligent proxy

    +

    While hardware failures are a possibility, a more common scenario we see in practice involves a DB crash due to resource exhaustion or timeoutsā€”such as running out of allocated temp space due to rogue queries or an unplanned large spike in data load. In such instances, it is crucial for application connections to smoothly transition to an alternate server.

    +

    Behind the scenes, SkySQL consistently directs SQL through its intelligent proxy. This proxy not only continuously monitors servers for failures but also remains acutely aware of any replication lags in the replica servers. Should a primary server fail, an immediate election process ensues to select a replica with the least lag. Simultaneously, attempts are made to flush any pending events, ensuring synchronization and full data consistency. Any pending transactions on the primary server are also replayed. Collectively, these measures enable applications to operate without connection-level interruptions or SQL exceptions. Achieving heightened levels of High Availability (HA) is effortlessly attainable by expanding the number of replicas. Replication can even extend across different cloud providers or to a self-managed (Ė®peace of mindĖ®) replica within a customerŹ¼s own environment.

    +

    HA in a Single Region

    +

    HA in a single region

    +

    Scaling Concurrent Users without Compromising Consistency

    +

    Cloud offerings of open source relational databases often achieve scalability by distributing data across a cluster of nodes, often relying on a replication model where ā€˜writesā€™ to the primary node are asynchronously transmitted to one or more replicas. Typically, the onus is on the customer to manage the distribution of traffic across the cluster, either through client application logic or by configuring a proxy service. Several customers have told us that this is simply too big a challenge, effectively capping the scalability of these cloud solutions. Even when customers successfully navigate this challenge, with this approach data consistency might not be uniform across the entire cluster at any given moment.

    +

    When application client connections are evenly load balanced across these replicas for ā€˜reads,ā€™ the application must either tolerate potentially stale reads or consistently direct all requests to the primary, severely limiting scalability. Replicas are relegated to offline tasks like reporting ā€” a common scenario from our observations in AWS RDS.

    +

    Contrastingly, in SkySQL, the intelligent proxy provides consistency options without compromising its ability to load balance requests across replicas, supporting both ā€˜causalā€™ and ā€˜strong, globalā€™ consistency models. Letā€™s delve deeper.

    +

    Causal consistency ensures that ā€˜readsā€™ are fresh only concerning the writes they are causally dependent on. For instance, when an app client executes a ā€˜writeā€™ followed by a ā€˜read,ā€™ it expects to see the changed value, causally dependent on the preceding ā€˜write.ā€™ This sequence may need to be satisfied exclusively by the primary if the replicas lag behind. Concurrent clients, however, continue to be load balanced across all servers.

    +

    This model functions optimally when application clients utilize sticky SQL connections. However, in the modern landscape where applications are often distributed (micro services) and rely on connection pooling frameworks, a ā€˜writeā€™ and the subsequent ā€˜readā€™ might occur on different connections. To ensure consistent reads, awareness of the ā€˜lagā€™ at a global level is imperative. Fortunately, this is seamlessly achieved with a simple switch in SkySQL. If the ā€˜writeā€™ rate is moderate and the replicas can keep up (a prevalent scenario in practice), clients continue to uniformly utilize the entire cluster.

    +

    Configuring Causal Read in SkySQL

    +

    Causal consistency is configured in the SkySQL Configuration Manager, maxscale settings (applies to Replicated clusters only)

    +
    +

    Note

    +

    You can configure causal reads using the SkySQL configuration Manager. Look for maxscale properties and search for causal_reads.

    +
      +
    • set causal_reads to 'local' to achieve consistency at a connection/session level.
    • +
    • set causal_reads to 'global' for strict consistency across all connections.
    • +
    • set causal_reads to 'fast' to achieve consistency at a connection/session level but is faster than 'local' but at the cost of load balancing.
    • +
    +
    +

    You can also configure causal_reads_timeout so any reads on replicas don't wait too long for a consistent read.

    +

    Finally, you can configure the max_slave_replication_lag which determines the max lag for any read. The load balancer will only routes to slaves with a lag less than this value.

    +

    Increased throughput using Active-Active

    +

    Unlike RDS or GCP, where the standby is not used for client requests (wasting resources), SkySQL maximizes the available compute power across all nodes, delivering unparalleled cost effectiveness.

    +

    A notable feature enhancing performance is the ā€˜Read-Write Splitting,ā€™ allowing for custom routing to achieve consistently lower latencies for specific application patterns. For example, point queries and index-optimized queries can be directed to select nodes hosting frequently accessed data, while more resource-intensive scan-aggregation class queries (such as those for reporting dashboards or complex queries based on end-user selections of historical data) can be routed to a separate set of nodes. These routing strategies effectively segment actively used data sets, optimizing the DB buffer cache and resulting in lower latencies.

    +

    The implementation of these routing strategies is straightforward, primarily through the use of ā€œHint Filters.ā€ Standard SQL comments are utilized to customize routing to the appropriate server. Additional details on Hint Filters and Read-Write Splitting can be found in the MariaDB documentation.

    +

    In SkySQL you can control routing using 2 strategies:

    +
      +
    • Using the read port for the service: Typically this will be port 3307. When using this port the request (read_only) will be load balanced only across the available replicas.
    • +
    • Using the Hintfilter (TODO: provide detailed example using SkySQL node names)
    • +
    +

    Level 3 Resiliency - Disaster Recovery ā€“ Across Regions, Cloud Providers, or ā€œSelf-managedā€ Environments

    +
    +

    Note

    +

    Please refer to this document for the steps to setup a distant replica for DR.

    +
    +

    The major cloud providers tout disaster recover across regions, ensuring resilience against natural disasters impacting an entire geographical region. But in reality, such disasters are exceedingly rare. WhatŹ¼s far more common are technical issues impacting an entire region for a specific cloud provider. For instance, weā€™ve encountered DNS-level failures in GCP regions, rendering all services dependent on DNS, including SkySQL, inaccessible.

    +

    One effective strategy to mitigate such risks is to replicate data to a data center owned by a different cloud provider within the same geographical area, minimizing network latencies. Disaster recovery across cloud providers is of course something an individual provider such as AWS or GCP simply donŹ¼t support. Alternatively, customers can maintain their own ā€œstandbyā€ database for emergenciesā€”an environment entirely under their control, ensuring a near-real time copy of the data at all times.

    +

    Failover to another Region

    +

    Failover when entire region becomes unavailable

    +

    SkySQL empowers users to configure ā€œexternalā€ replicas that can run anywhere, offering flexibility and resilience.

    +

    To facilitate this, SkySQL provides several built-in stored procedures for configuring both ā€œoutboundā€ and ā€œinboundā€ replication to any compatible MariaDB or MySQL server environment. This flexibility allows users to tailor their disaster recovery strategy based on their specific needs, whether replicating across regions, cloud providers, or maintaining self-managed standby environments.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Portal features/Billing/index.html b/Portal features/Billing/index.html new file mode 100644 index 00000000..551bc9ec --- /dev/null +++ b/Portal features/Billing/index.html @@ -0,0 +1,2624 @@ + + + + + + + + + + + + + + + + + + + + + + + Billing - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Billing

    +

    Billing is associated with aĀ SkySQL ID.

    +

    For pricing information see "Pricing" .

    +

    Usage Information

    +

    From theĀ Portal, you can access a current billing and usage summary:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Click your name in the upper-right corner of the interface, then select "Billing" from the menu.
    4. +
    5. The "Current Usage" tab (the default) shows current billing and usage summary.
    6. +
    +

    Current charges, prior billing date, and next invoice date are shown.

    +

    Usage information can be shown by service or by resource.

    +

    Click the resource name or service name to expand the view.

    +

    current-usage.png

    +

    Billing - Current Usage

    +

    Billing History & Invoices

    +

    From theĀ Portal, you can access prior invoices:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Click your name in the upper-right corner of the interface, then select "Billing" from the menu.
    4. +
    5. Click the "Billing History" tab to show available invoices.
    6. +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Portal features/Launch page/index.html b/Portal features/Launch page/index.html new file mode 100644 index 00000000..8925ed6a --- /dev/null +++ b/Portal features/Launch page/index.html @@ -0,0 +1,2562 @@ + + + + + + + + + + + + + + + + + + + + + + + Launch page - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Launch page

    +

    Launch page can be accessed at Launch

    +

    launch

    +

    Launch Service

    +

    While making launch-time selections, your selections and estimated costs are shown on the right panel.

    +

    To launch a SkySQL service from theĀ Portal:

    +
      +
    1. From the Dashboard, click the + Launch New Service button.
    2. +
    3. Choose Service Type: Transations
    4. +
    5. Choose the desiredĀ Topology: Enterprise Server Single Node or Enterprise Server With Replica(s)
    6. +
    7. Choose the desiredĀ Cloud Provider: aws, Google Cloud or Azure
    8. +
    9. Choose the desiredĀ Region.
        +
      • Each region has a scheduled maintenance window.
      • +
      +
    10. +
    11. Choose the desiredĀ Hardware Architecture: Default AMD64, additionally ARM64 for AWS Graviton.
    12. +
    13. Choose the desiredĀ Instance Size.
        +
      • If your workload requires a larger instance size, contact us regardingĀ Power Tier.
      • +
      +
    14. +
    15. If needed, enableĀ Auto-Scaling of Nodes.
    16. +
    17. Choose the desiredĀ Storage Configuration.
    18. +
    19. If needed, enableĀ Auto-Scaling of Storage.
    20. +
    21. Choose number of nodes to deploy.
    22. +
    23. Choose the desiredĀ Software Version.
    24. +
    25. Enter the desired Service Name between 4-24 characters.
    26. +
    27. Enable topology-specific features, if desired:
        +
      • Disable SSL/TLS
      • +
      • NoSQL Interface
      • +
      +
    28. +
    +

    After initiating service launch, the service will be shown on theĀ PortalĀ Dashboard.

    +

    AĀ notificationĀ will be sent at time of service launch initiation and when service launch completes.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Portal features/Manage your Service/index.html b/Portal features/Manage your Service/index.html new file mode 100644 index 00000000..cd873a07 --- /dev/null +++ b/Portal features/Manage your Service/index.html @@ -0,0 +1,2627 @@ + + + + + + + + + + + + + + + + + + + + + + + Manage your Service - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Manage your Service

    +

    SkySQL's self-service management features enable authorized accounts to launch cloud databases, start and stop cloud databases, delete cloud databases, apply database configuration changes, and configure the cloud database's IP firewall.

    +

    Self-serviceĀ user managementĀ features enable you to define role-based access for your team to jointly manage SkySQL resources.

    +

    Stop a Service

    +

    stop-service.png

    +

    Stop Service

    +

    To stop a service:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Click the MANAGE button (at right) for the desired service.
    4. +
    5. Choose the "Stop Service" menu item.
    6. +
    7. Click the "Yes, Stop this service" button to confirm this operation.
    8. +
    +

    The service will be stopped. You will only be charged for storage on a stopped service.

    +

    NotificationsĀ will be generated when this operation is initiated and when the operation is performed.

    +

    Start a Service

    +

    start-service.png

    +

    Start Service

    +

    To start a service:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Click the "MANAGE" button (at right) for the desired service.
    4. +
    5. Choose the "Start Service" menu item.
    6. +
    7. Click the "Yes, Start this service" button to confirm this operation.
    8. +
    +

    The service will be started. Service start may take up to 10-15 minutes. The normal billing cycle for the service will resume.

    +

    NotificationsĀ will be generated when this operation is initiated and when the operation is performed.

    +

    Scale Nodes In/Out

    +

    scale-in-out.png

    +

    Service - Horizontal Scaling

    +

    Horizontal scaling is performed by scaling nodes In (reducing node count) or Out (increasing node count).

    +

    To scale nodes horizontally:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Identify the service you want to scale. Services must be in a "Healthy" state to scale.
    4. +
    5. Click the "MANAGE" button (at right) for the desired service.
    6. +
    7. Choose the "Scale nodes in/out" menu item.
    8. +
    9. Change the node count to the desired value.
    10. +
    11. Optionally, you can check the "Auto-scale nodes horizontally" checkbox to enableĀ AutonomousĀ features for this service.
    12. +
    13. Click the "Apply Changes" button.
    14. +
    +

    The service immediately goes into scaling status.

    +

    NotificationsĀ will be generated when this operation is initiated and when the operation is performed.

    +

    Scale Nodes Up/Down

    +

    scale-up-down.png

    +

    Service - Vertical Scaling

    +

    Vertical scaling is performed by scaling nodes Up (increasing node size) or Down (decreasing node size).

    +

    To scale nodes vertically:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Identify the service you want to scale. Services must be in a "Healthy" state to scale.
    4. +
    5. Click the "MANAGE" button (at right) for the desired service.
    6. +
    7. Choose the "Scale nodes up/down" menu item.
    8. +
    9. Change the node count to the desired value.
    10. +
    11. Optionally, you can check the "Auto-scale nodes vertically" checkbox to enableĀ AutonomousĀ features for this service.
    12. +
    13. Click the "Apply Changes" button.
    14. +
    +

    The service immediately goes into scaling status.

    +

    NotificationsĀ will be generated when this operation is initiated and when the operation is performed.

    +

    Scale Storage

    +

    scale-storage.png

    +

    Service - Scale Storage

    +

    To expand block storage capacity:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Identify the service you want to scale. Services must be in a "Healthy" state to scale.
    4. +
    5. Click the "MANAGE" button (at right) for the desired service.
    6. +
    7. Choose the "Scale storage" menu item.
    8. +
    9. Use the slider to select the desired amount of storage.
    10. +
    11. Click the "Apply Changes" button.
    12. +
    +

    Storage scaling is subject to a 6 hour cooldown period.

    +

    Storage upgrades are not reversible.

    +

    Delete a Service

    +

    delete-service.png

    +

    Service - Delete

    +

    To delete a service:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Identify the service you want to delete.
    4. +
    5. Click the "MANAGE" button (at right) for that service.
    6. +
    7. Choose the "Delete Service" menu item.
    8. +
    9. Read the warning and follow the provided instructions to confirm your delete operation.
    10. +
    11. Click "Yes, delete".
    12. +
    +

    Your service and all its data will be deleted. This operation is non-reversible.

    +

    NotificationsĀ will be generated when this operation is initiated and when the operation is performed.

    +

    Other Self-Service Operations

    + + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Portal features/Notifications/index.html b/Portal features/Notifications/index.html new file mode 100644 index 00000000..d302e1db --- /dev/null +++ b/Portal features/Notifications/index.html @@ -0,0 +1,2603 @@ + + + + + + + + + + + + + + + + + + + + + + + Notifications - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Notifications

    +

    Actions performed through the Portal will generate a notification.

    +

    One notification is generated when an action is initiated.

    +

    Additional notifications are generated to convey status as the action is carried out by the system.

    +

    Access to Notifications

    +

    To access current notifications:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Click the bell icon in the upper-right corner of the interface.
    4. +
    +

    A menu of recent notifications will be displayed.

    +

    The bell icon will include a red dot indicator when a new notification is present. This indicator can be cleared by clicking the "Clear all" link.

    +

    notifications.png

    +

    Notifications

    +

    To view historical notifications, click the "View more" link at the bottom of the menu. When viewing historical notifications, notifications can be filtered by category and time frame.

    +

    notifications-all.png

    +

    Notifications - current and historical

    +

    Notification Categories

    +
      +
    • Service Alerts, which are based onĀ Alerts
    • +
    • Billing
    • +
    • Service, which are based onĀ PortalĀ actions
    • +
    • Organization
    • +
    +

    User Preferences

    +

    You can configure the notifications delivered to your email address from User Preferences.

    +

    To access User Preferences:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Click your name in the upper-right corner of the interface.
    4. +
    5. Choose Profile.
    6. +
    7. Click to expand Notification preferences section.
    8. +
    +

    From User Preferences you can specify your notification preferences:

    +
      +
    • Whether to send notifications to you by email
    • +
    • Which Notification Categories you want to be sent
    • +
    +

    notification-preferences.png

    +

    User Preferences

    +

    Notification Channels

    +

    In addition to display in the Portal, notifications can also be delivered by email.

    +

    Notification Channels define who receives what type of notifications.

    +

    To access Notification Channel settings:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Click the "Settings" link in the main menu (left navigation in the Portal).
    4. +
    5. Click the "Notification Channel" button.
    6. +
    +

    notification-channels.png

    +

    Notification Channels

    +

    Add a Notification Channel

    +

    To add a Notification Channel, from the Notification Channel settings interface:

    +
      +
    1. Click the "Add" button in the upper-right corner.
    2. +
    3. Enter a channel name.
    4. +
    5. Enter the email address that will receive notifications.
    6. +
    7. Choose the notification categories that should be sent to that address.
    8. +
    +

    add-notification-channel.png

    +

    Notification Channels - Adding a Channel

    +

    Remove a Notification Channel

    +

    To remove a Notification Channel, from the Notification Channel settings interface:

    +
      +
    1. Check the checkbox to the left of the notification channel to be removed.
    2. +
    3. Click the "Delete" button (which appears when a notification channel is selected by checkbox).
    4. +
    5. Confirm removal of the notification channel by clicking the "Yes, delete" button.
    6. +
    +

    Edit a Notification Channel

    +

    To modify a Notification Channel, from the Notification Channel settings interface:

    +
      +
    1. Click the name of the channel to modify.
    2. +
    3. Make the desired changes to the channel name, email address recipient list, and notification categories.
    4. +
    5. Click the "Save" button.
    6. +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Portal features/Service Details page/index.html b/Portal features/Service Details page/index.html new file mode 100644 index 00000000..73c04c6b --- /dev/null +++ b/Portal features/Service Details page/index.html @@ -0,0 +1,2559 @@ + + + + + + + + + + + + + + + + + + + + + + + Service Details page - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Service Details page

    +

    AfterĀ service launch, a detailed summary of the service can be accessed in the Service Details interface.

    +

    Access to Service Details

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. From the Dashboard, click the name of the desired service.
    4. +
    5. Click the "Details" tab.
    6. +
    +

    service-details.png

    +

    Service Details

    +

    Available Information

    +

    Service details vary based on topology.

    +

    Service details may include:

    +
      +
    • Hardware architecture
    • +
    • Instance size
    • +
    • Software version
    • +
    • Timestamp of service launch
    • +
    • Storage capacity
    • +
    • Count of replicas
    • +
    • Read-only TCP port
    • +
    • Read-write TCP port
    • +
    • NoSQL interface TCP port
    • +
    • IP address used for outbound traffic
    • +
    • Auto-scaling settings for nodes
    • +
    • Auto-scaling settings for storage
    • +
    • Fully Qualified Domain Name (hostname)
    • +
    • Configuration settings applied to the service
    • +
    • Current charges and hourly costs
    • +
    • Scheduled maintenance window
    • +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Portal features/Service Monitoring Panels/index.html b/Portal features/Service Monitoring Panels/index.html new file mode 100644 index 00000000..0faccb07 --- /dev/null +++ b/Portal features/Service Monitoring Panels/index.html @@ -0,0 +1,4174 @@ + + + + + + + + + + + + + + + + + + + + + + + Service Monitoring Panels - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Service Monitoring Panels

    +

    The available panels are :

    +

    monitoring.png +Monitoring page

    +

    Current SQL Commands (service)

    +

    This panel shows the ratio between the types of SQL statements executed by the service during the selected time interval.

    +

    CPU Load (service,status)

    +

    This panel shows the CPU usage for each server node during the selected time interval.

    +

    QPS (service,status)

    +

    This panel shows the queries per second (QPS) executed by the server node during the selected time interval.

    +

    Connections (service)

    +

    This panel shows the number of used and aborted connections for each ES node along with the max_connections value.

    +

    Replicas status

    +

    This panel shows summarized values for certain replication-related metadata to help determine if any replica ES nodes encountered replication issues during the selected time interval.

    +

    Replicas lags

    +

    This panel shows average values for certain replication-related metadata to help determine if the replica ES nodes are currently lagging behind the primary ES node.

    +

    Disk Size of Data

    +

    This panel shows the amount of storage space used (as the usage percentage, actual size, and total size) by each server node.

    +

    Disk Size of Logs

    +

    This panel shows the amount of storage space used by each server node during the selected time interval.

    +

    lags-panel.png

    +

    GTID Replication Position (service,replicas)

    +

    This panel shows the Global Transaction ID (GTID) for each ES node during the selected time interval.

    +

    Seconds Behind Primary

    +

    This panel shows the average number of seconds that the replica ES nodes lagged behind the primary ES node during the selected time interval.

    +

    Exec Primary Log Position

    +

    This panel shows the current binary log position of the replica SQL thread for each ES node during the selected time interval.

    +

    Read Primary Log Position

    +

    This panel shows the current binary log position of the replica I/O thread for each ES node during the selected time interval.

    +

    queries-panel.png

    +

    Top Command Counters (service,queries)

    +

    This panel shows the top 30 statement types that were most frequently executed by all ES nodes during the selected time interval.

    +

    Top Command Counters (server)

    +

    This panel shows the top 30 statement types that were most frequently executed by the ES node during the selected time interval.

    +

    Top Command Counters Hourly (service)

    +

    This panel shows the top 30 statement types that were most frequently executed by all ES and Xpand nodes in 1 hour intervals over the past 24 hours.

    +

    Top Command Counters Hourly (server)

    +

    This panel shows the top 30 statement types that were most frequently executed by the ES node in 1 hour intervals over the past 24 hours.

    +

    MariaDB QPS

    +

    This panel shows the number of queries per second (QPS) executed by all ES nodes during the selected time interval.

    +

    MariaDB Slow Queries (service,queries)

    +

    This panel shows the number of slow queries executed by all ES nodes during the selected time interval.

    +

    MariaDB Slow Queries (server)

    +

    This panel shows the number of slow queries executed by the ES node during the selected time interval.

    +

    MariaDB QPS and Questions

    +

    This panel shows the number of queries and questions per second executed by the ES node during the selected time interval.

    +

    MariaDB Client Thread Activity (service)

    +

    This panel shows the number of client threads running on all ES nodes during the selected time interval.

    +

    MariaDB Client Thread Activity (server)

    +

    This panel shows the number of client threads connected and running on the ES node during the selected time interval.

    +

    database-panel.png

    +

    MaxScale Service Connections

    +

    This panel shows the number of clients connected to all MaxScale nodes during the selected time interval.

    +

    MaxScale Server Connections

    +

    This panel shows the number of client connections open between the MaxScale node and each ES node during the selected time interval.

    +

    MariaDB Service Connections

    +

    This panel shows the number of clients connected to the ES node during the selected time interval.

    +

    MariaDB Aborted Connections

    +

    This panel shows the number of connections aborted by the ES node during the selected time interval.

    +

    MariaDB Table Locks (service)

    +

    This panel shows the number of table locks requested by all ES nodes during the selected time interval.

    +

    MariaDB Table Locks (server)

    +

    This panel shows the number of table locks requested by the ES node during the selected time interval.

    +

    MariaDB Open Tables (service)

    +

    This panel shows the number of tables opened by the database servers on all ES nodes during the selected time interval.

    +

    MariaDB Open Tables (server)

    +

    This panel shows the number of tables opened by the database server on the ES node during the selected time interval.

    +

    MariaDB Table Opened

    +

    This panel shows the number of tables that have been opened by all ES nodes during the selected time interval.

    +

    system-panel.png

    +

    CPU Load

    +

    This panel shows the CPU usage for each server node during the selected time interval.

    +

    Memory Usage

    +

    This panel shows memory usage details for all ES nodes during the selected time interval.

    +

    I/O Activity - Page In

    +

    This panel shows the total number of bytes read from the ES node's file system during the selected time interval.

    +

    I/O Activity - Page Out

    +

    This panel shows the total number of bytes written to the ES node's file system during the selected time interval.

    +

    I/O Activity (server,system)

    +

    This panel shows the total number of bytes written to or read from the ES node's file system during the selected time interval.

    +

    IOPS

    +

    This panel shows the number of input/output operations per second performed by the ES node during the selected time interval.

    +

    IOPS - Page In

    +

    This panel shows the total number of reads performed from the ES node's file system during the selected time interval.

    +

    IOPS - Page Out

    +

    This panel shows the total number of writes performed from the ES node's file system during the selected time interval.

    +

    Network Traffic - Inbound

    +

    This panel shows the amount of data received over the network by the operating systems on all ES nodes during the selected time interval.

    +

    Network Traffic - Outbound

    +

    This panel shows the amount of data sent over the network by the operating systems on all ES nodes during the selected time interval.

    +

    MariaDB Network Traffic (service)

    +

    This panel shows the amount of data sent and received over the network by the database servers on all ES nodes during the selected time interval.

    +

    MariaDB Network Traffic (server)

    +

    This panel shows the amount of data sent and received over the network by the database server on the ES node during the selected time interval.

    +

    Network Traffic (server,status)

    +

    This panel shows the amount of data sent and received over the network by the operating system on the ES node during the selected time interval.

    +

    MariaDB Network Usage Hourly (service)

    +

    This panel shows the amount of data sent and received over the network per hour by the database servers on all ES nodes over the past 24 hours.

    +

    MariaDB Network Usage Hourly (server)

    +

    This panel shows the amount of data sent and received over the network per hour by the database server on the ES node over the past 24 hours.

    +

    Network Errors (service)

    +

    This panel shows the number of network errors encountered by all ES nodes during the selected time interval.

    +

    Network Errors (server)

    +

    This panel shows the number of network errors encountered by the ES node during the selected time interval.

    +

    Network Packets Dropped (service)

    +

    This panel shows the number of network packets dropped by all ES nodes during the selected time interval.

    +

    Network Packets Dropped (server)

    +

    This panel shows the number of network packets dropped by the ES node during the selected time interval.

    +

    CPU (server,status,gauge)

    +

    This panel shows the current CPU usage for the ES or Xpand node.

    +

    RAM (server,status)

    +

    This panel shows the current memory usage details for the ES or Xpand node.

    +

    RAM (server,status,graph)

    +

    This panel shows memory usage details for the ES or Xpand node during the selected time interval.

    +

    Buffer Pool Size of Total RAM

    +

    This panel shows the current size of the InnoDB buffer pool for the ES node in two units: the absolute size and the percentage of the server's usable memory.

    +

    Used Connections

    +

    This panel shows the current number of client connections as a percentage of the ES node's max_connections value.

    +

    InnoDB Data / sec (server,status)

    +

    This panel shows the number of bytes per second read and written by InnoDB during the selected time interval.

    +

    Rows / sec

    +

    This panel shows the total number of rows written and read per second by the ES node during the selected time interval.

    +

    MariaDB Connections

    +

    This panel shows the number of client connections to the ES node during the selected time interval.

    +

    MariaDB Opened Files / sec

    +

    This panel shows the number of files opened per second by the database server on the ES node during the selected time interval.

    +

    MariaDB Open Files

    +

    This panel shows the number of files opened by the database server on the ES node during the selected time interval.

    +

    MariaDB Transaction Handlers / sec

    +

    This panel shows the number of transaction-related handlers created by the ES node during the selected time interval.

    +

    Temporary Objects Created

    +

    This panel shows the number of temporary tables created by the ES node during the selected time interval.

    +

    MariaDB Thread Cache

    +

    This panel shows the number of threads created and cached for re-use on the ES node during the selected time interval.

    +

    MariaDB Table Open Cache Status

    +

    This panel shows the activity of the table open cache on the ES node during the selected time interval.

    +

    MariaDB Table Definition Cache

    +

    This panel shows how many table definitions were cached by the ES node during the selected time interval.

    +

    Memory Distribution

    +

    This panel shows memory usage details for the ES node during the selected time interval.

    +

    MariaDB Memory Overview

    +

    This panel shows how much memory the ES node used for the InnoDB buffer pool, InnoDB log buffer, MyISAM key buffer, and query cache during the selected time interval.

    +

    Memory (server,performance)

    +

    This panel shows memory usage details for the MaxScale node during the selected time interval.

    +

    RW/sec (server,cluster)

    +

    This panel shows the number of read and write operations per second that were handled by the threads on the MaxScale node during the selected time interval.

    +

    Threads

    +

    This panel shows the number of threads currently used by the MaxScale node.

    +

    MaxScale Modules

    +

    This panel lists the modules installed on the MaxScale node.

    +

    MaxScale Hangups (server,performance)

    +

    This panel shows the number of client connections closed by the MaxScale node during the selected time interval.

    +

    Errors (server,performance)

    +

    This panel shows the number of errors encountered by threads on the MaxScale node during the selected time interval.

    +

    Event Queue Length (server,performance)

    +

    This panel shows the total event queue length for all threads on the MaxScale node during the selected time interval.

    +

    MaxScale Descriptors (server,cluster)

    +

    This panel shows the number of descriptors used by the MaxScale node during the selected time interval.

    +

    Max Time in Queue (server,cluster)

    +

    This panel shows the longest time the MaxScale node waited for an I/O event during the selected time interval.

    +

    MaxScale Connections

    +

    This panel shows the number of clients connected to the MaxScale node during the selected time interval.

    +

    Database Server Connections

    +

    This panel shows the number of database server connections open between the MaxScale node and each ES or Xpand node during the selected time interval.

    +

    Resident (server,cluster)

    +

    This panel shows the current resident set size (RSS) of the MaxScale process.

    +

    Stack size (server,cluster)

    +

    This panel shows the current stack size of the MaxScale node.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Portal features/add-notification-channel.png b/Portal features/add-notification-channel.png new file mode 100644 index 00000000..2061f487 Binary files /dev/null and b/Portal features/add-notification-channel.png differ diff --git a/Portal features/current-usage.png b/Portal features/current-usage.png new file mode 100644 index 00000000..1d7424d1 Binary files /dev/null and b/Portal features/current-usage.png differ diff --git a/Portal features/dashboard.png b/Portal features/dashboard.png new file mode 100644 index 00000000..dfd775f7 Binary files /dev/null and b/Portal features/dashboard.png differ diff --git a/Portal features/database-panel.png b/Portal features/database-panel.png new file mode 100644 index 00000000..b8622844 Binary files /dev/null and b/Portal features/database-panel.png differ diff --git a/Portal features/delete-service.png b/Portal features/delete-service.png new file mode 100644 index 00000000..50e17406 Binary files /dev/null and b/Portal features/delete-service.png differ diff --git a/Portal features/index.html b/Portal features/index.html new file mode 100644 index 00000000..6c77afee --- /dev/null +++ b/Portal features/index.html @@ -0,0 +1,2717 @@ + + + + + + + + + + + + + + + + + + + + + + + Portal features - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    + +
    + + + +
    +
    + + + + +

    Portal features

    +

    From the SkySQL Portal, you can launch, monitor, and manage your SkySQL services.

    +

    Access the Portal

    +

    You can access the Portal here

    +

    Dashboard

    +

    dashboard

    +

    From the Dashboard, you can see a list of your SkySQL services and status information for each service.

    +

    From a different view, the Dashboard can be accessed by clicking the "Dashboard" link in the main menu (left navigation in the Portal).

    +

    Launch

    +

    To launch a new service, click the "+ Launch New Service" button on the Dashboard.

    +

    See "Service Launch" for details on the service launch process and launch-time selections.

    +

    Service-Specific Interfaces

    +

    Service-specific interfaces are available from the Dashboard by clicking on the service name for the desired service.

    +

    Service-specific interfaces will vary by topology.

    +

    Service-specific interfaces are provided to:

    + +

    Connect

    +

    From the Dashboard, the details needed to connect to your SkySQL service can be seen by clicking on the "CONNECT" button for the desired service.

    +

    See "Client Connections" for details on how to connect to a service.

    +

    Manage

    +

    From the Dashboard, the "MANAGE" button for a service provides access to:

    + +

    Billing

    +

    The Dashboard includes a Spending gauge to indicate current charges. More detailed billing information can be accessed by clicking on the Spending gauge.

    +

    Alternatively, you can access detailed billing and invoice information by clicking on your name in the upper-right corner of the interface, then select "Billing" from the menu.

    +

    See "Billing" for additional details.

    +

    Monitoring

    +

    The Dashboard includes monitoring gauges for Current SQL Commands, CPU Load, and QPS (Queries Per Second). More detailed monitoring can be accessed by clicking on one of these gauges.

    +

    Alternatively, you can access detailed server and service monitoring by clicking on the service name from the Dashboard, then accessing the Monitoring tab (the default view).

    +

    See "Monitoring" for additional details.

    +

    Alerts

    +

    The Dashboard includes the count of active monitoring alerts for your service. More detailed alert information can be accessed by clicking on the Alerts gauge.

    +

    Alternatively, you can access monitoring alerts by clicking the "Alerts" link in the main menu (left navigation in the Portal).

    +

    Logs

    +

    Server log files can be accessed by clicking the "Logs" link in the main menu (left navigation in the Portal).

    +

    Settings

    +

    These settings can be accessed by clicking the "Settings" link in the main menu (left navigation in the Portal):

    + +

    Notifications

    +

    Actions performed through the Portal will generate a notification.

    +

    To view current notifications, click the bell icon in the upper-right corner of the interface.

    +

    See "Notifications" for additional details.

    +

    User Preferences

    +

    To customize your email notification preferences, click your name in the upper-right corner of the interface, then choose "User preferences".

    +

    See "Notifications" for additional details.

    +

    Logout

    +

    To log out from SkySQL, click your name in the upper-right corner of the interface, then choose "Logout" from the menu.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Portal features/lags-panel.png b/Portal features/lags-panel.png new file mode 100644 index 00000000..d2fc8a7c Binary files /dev/null and b/Portal features/lags-panel.png differ diff --git a/Portal features/launch.png b/Portal features/launch.png new file mode 100644 index 00000000..69fded4a Binary files /dev/null and b/Portal features/launch.png differ diff --git a/Portal features/monitoring.png b/Portal features/monitoring.png new file mode 100644 index 00000000..335b8fe1 Binary files /dev/null and b/Portal features/monitoring.png differ diff --git a/Portal features/notification-channels.png b/Portal features/notification-channels.png new file mode 100644 index 00000000..17221722 Binary files /dev/null and b/Portal features/notification-channels.png differ diff --git a/Portal features/notification-preferences.png b/Portal features/notification-preferences.png new file mode 100644 index 00000000..6d381678 Binary files /dev/null and b/Portal features/notification-preferences.png differ diff --git a/Portal features/notifications-all.png b/Portal features/notifications-all.png new file mode 100644 index 00000000..e466c945 Binary files /dev/null and b/Portal features/notifications-all.png differ diff --git a/Portal features/notifications.png b/Portal features/notifications.png new file mode 100644 index 00000000..f1d17fce Binary files /dev/null and b/Portal features/notifications.png differ diff --git a/Portal features/queries-panel.png b/Portal features/queries-panel.png new file mode 100644 index 00000000..297e243e Binary files /dev/null and b/Portal features/queries-panel.png differ diff --git a/Portal features/scale-in-out.png b/Portal features/scale-in-out.png new file mode 100644 index 00000000..465a3c54 Binary files /dev/null and b/Portal features/scale-in-out.png differ diff --git a/Portal features/scale-storage.png b/Portal features/scale-storage.png new file mode 100644 index 00000000..292866aa Binary files /dev/null and b/Portal features/scale-storage.png differ diff --git a/Portal features/scale-up-down.png b/Portal features/scale-up-down.png new file mode 100644 index 00000000..c230e24b Binary files /dev/null and b/Portal features/scale-up-down.png differ diff --git a/Portal features/service-details.png b/Portal features/service-details.png new file mode 100644 index 00000000..ba2f2a43 Binary files /dev/null and b/Portal features/service-details.png differ diff --git a/Portal features/start-service.png b/Portal features/start-service.png new file mode 100644 index 00000000..8486b755 Binary files /dev/null and b/Portal features/start-service.png differ diff --git a/Portal features/stop-service.png b/Portal features/stop-service.png new file mode 100644 index 00000000..e2189074 Binary files /dev/null and b/Portal features/stop-service.png differ diff --git a/Portal features/system-panel.png b/Portal features/system-panel.png new file mode 100644 index 00000000..77fb1cd4 Binary files /dev/null and b/Portal features/system-panel.png differ diff --git a/Quickstart/Launch DB using the REST API/index.html b/Quickstart/Launch DB using the REST API/index.html new file mode 100644 index 00000000..e7ab5faa --- /dev/null +++ b/Quickstart/Launch DB using the REST API/index.html @@ -0,0 +1,2952 @@ + + + + + + + + + + + + + + + + + + + + + + + Launch DB using the REST API - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Launch DB using the REST API

    +

    This walkthrough explains how to launch database services and manage the lifecycle of database services using the SkySQL DBaaS REST API.

    +

    Launch a Service

    +

    Step 1: Generate API Key

    +
      +
    1. +

      Go to SkySQL API Key management page and generate an API key

      +
    2. +
    3. +

      Export the value from the token field to an environment variable $API_KEY

      +
      export API_KEY='... key data ...'
      +
      +

      TheĀ API_KEYĀ environment variable will be used in the subsequent steps.

      +
    4. +
    +

    Use it on subsequent request, e.g: +

     curl --request GET 'https://api.skysql.com/provisioning/v1/services' \
    +    --header "X-API-Key: $API_KEY"
    +

    +
    +

    Note

    +

    You can use the Swagger docs site we host we try out the API OR +Follow the instructions below to try the API using your command Shell

    +
    +

    Step 2: Use Swagger docs to try out the APIs

    +

    You can use the API Documentation here and directly try out the APIs in your browser.

    +

    All you need is to click ā€˜Authorizeā€™ and type in <supply your API key here>

    +
    +

    Note

    +

    ** Pre-requisites for code below **

    +

    The examples below useĀ curlĀ as the REST client.Ā curlĀ is available for Linux, macOS, and MS Windows. Of course, you can use any language client that supports invoking REST over HTTP. +Examples below also use jq, a JSON parsing utility.Ā jqĀ is available for Linux, macOS, and MS Windows. Install jq then proceed.

    +

    The examples also make use ofĀ teeĀ to save the response JSON data to a file while also allowing it to be piped toĀ jqĀ for output. Both Linux and macOS supportĀ teeĀ as described in the examples. On MS Windows, Powershell has aĀ teeĀ command that requires theĀ -filepathĀ option to be inserted prior to the filename.

    +

    TheĀ chmodĀ command is used to make a file private to the current user. If your environment doesn't supportĀ chmod, you can set the file's permissions using other means.

    +

    The examples also make use of exported variables andĀ ${VARIABLE_NAME}Ā variable references that are compatible with Bourne-like shells (such asĀ sh,Ā bash, andĀ zsh). On MS Windows, you will need to adapt these instructions if you are not using a Bourne-like shell. For example, you can copy just theĀ jqĀ part of an export command (from inside the backticks), run that on its own, and then copy/paste the resulting string into a variable assignment for your shell.

    +

    Finally, the examples use a backslash at the end of some of the lines to indicate to the shell that a command spans multiple lines. If your shell doesn't allow this, remove each trailing backslash character and join the following line to the end of the current line.

    +
    +

    Step 2: Determine the Client IP Address

    +

    When your new service is created, your client can only connect through the service's firewall if the client IP address is in the service's IP allowlist.

    +

    Before creating the new service, determine the public IP address of your client host and save it to theĀ SKYSQL_CLIENT_IPĀ environment variable.

    +

    If you are not sure of your public IP address, you can use a lookup service, such asĀ checkip.amazonaws.com:

    +
    export SKYSQL_CLIENT_IP=`curl -sS checkip.amazonaws.com`
    +
    +

    Step 3: Launch a Service

    +

    To launch a service:

    +
      +
    1. Prepare a request body containing the desired service options in a file calledĀ request-service.json:
    2. +
    +
    cat > request-service.json <<EOF
    +{
    +  "service_type": "transactional",
    +  "topology": "es-single",
    +  "provider": "gcp",
    +  "region": "us-central1",
    +  "architecture": "amd64",
    +  "size": "sky-2x8",
    +  "storage": 100,
    +  "nodes": 1,
    +  "name": "skysql-quickstart",
    +  "ssl_enabled": true,
    +  "allow_list": [
    +     {
    +        "comment": "Describe the IP address",
    +        "ip": "${SKYSQL_CLIENT_IP}/32"
    +     }
    +  ]
    +}
    +EOF
    +
    +

    This configuration is suitable for a quick test, but a more customized configuration should be selected for performance testing or for alignment to the needs of production workloads:

    + +
    curl -sS --location --request POST \
    +   --header "X-API-Key: ${API_KEY}" \
    +   --header "Accept: application/json" \
    +   --header "Content-type: application/json" \
    +   --data '@request-service.json' \
    +   https://api.skysql.com/provisioning/v1/services \
    +   | tee response-service.json | jq .
    +
    +

    Upon success, the command will return JSON with details about the new service.

    +
      +
    1. +

      Read the service ID for the new service and save the value in theĀ SKYSQL_SERVICEĀ environment variable:

      +
      $ export SKYSQL_SERVICE=`jq -r .id response-service.json`
      +
      +
    2. +
    +

    Step 4: Check Service State

    +

    Before advancing, check the service state using theĀ /provisioning/v1/services/${SKYSQL_SERVICE}Ā API endpoint:

    +
    curl -sS --location --request GET \
    +   --header "X-API-Key: ${API_KEY}" \
    +   --header "Accept: application/json" \
    +   https://api.skysql.com/provisioning/v1/services/${SKYSQL_SERVICE} \
    +   | tee response-state.json | jq .status
    +
    +

    When the service is still being launched, the JSON payload will containĀ "pending_create"Ā orĀ "pending_modifying"Ā as the service status.

    +

    When the service has been launched, the JSON payload containsĀ "ready", and you can continue with the next steps. Keep in mind that some of the following values will not be populated in the JSON data until this ready status has been achieved.

    +

    Step 5: Obtain Connection Details

    +

    Obtain the connection credentials for the new SkySQL service by executing the following commands:

    +
      +
    1. +

      Obtain the hostname and port of the service and save them to theĀ SKYSQL_FQDNĀ andĀ SKYSQL_PORTĀ environment variables:

      +
        +
      • +

        The hostname is specified with theĀ "fqdn"Ā key.

        +
        export SKYSQL_FQDN=`jq -r .fqdn response-state.json`
        +
        +
      • +
      • +

        Available TCP ports are specified in theĀ "endpoints"Ā array. For this test, connect to theĀ "port"Ā whereĀ "name"Ā isĀ "readwrite".

        +
        export SKYSQL_PORT=`jq '.endpoints[0].ports[] | select(.name=="readwrite") | .port' response-state.json`
        +
        +
      • +
      +
    2. +
    3. +

      Obtain the default username and password for the service using theĀ /provisioning/v1/services/${SKYSQL_SERVICE}/security/credentialsĀ API endpointĀ and save the response to theĀ response-credentials.jsonĀ file:

      +
    4. +
    +
    curl -sS --location --request GET \
    +   --header "X-API-Key: ${API_KEY}" \
    +   --header "Accept: application/json" \
    +   --header "Content-type: application/json" \
    +   https://api.skysql.com/provisioning/v1/services/${SKYSQL_SERVICE}/security/credentials \
    +   | tee response-credentials.json | jq .
    +
    +

    The default username and password will not be available until the service state isĀ "ready".

    +
      +
    1. +

      Set the file's mode to only allow the current user to read its contents:

      +
      $ chmod 600 response-credentials.json
      +
      +
    2. +
    3. +

      Read the username and password fromĀ response-credentials.jsonĀ and save them to theĀ SKYSQL_USERNAMEĀ andĀ SKYSQL_PASSWORDĀ environment variables:

      +
      $ export SKYSQL_USERNAME=`jq -r .username response-credentials.json`
      +$ export SKYSQL_PASSWORD=`jq -r .password response-credentials.json`
      +
      +
    4. +
    +

    Step 6: Connect

    +

    Connect to the database using the host, port, and default credentials using theĀ mariadbĀ client:

    +
    mariadb --host ${SKYSQL_FQDN} --port ${SKYSQL_PORT} \
    +   --user ${SKYSQL_USERNAME} --password="${SKYSQL_PASSWORD}" \
    +   --ssl-verify-server-cert 
    +
    +

    If you don't want the password to appear on the command-line, specify theĀ --passwordĀ command-line optionĀ without an argument to be prompted for a password.

    +

    Step 7: Save Connection Information (Optional)

    +

    To connect to your SkySQL service easily, it is possible to create aĀ .my.cnfĀ file in your home directory that contains all the details of your connection.

    +
      +
    1. Use the following command to create a newĀ .my.cnfĀ file or overwrite an existing one and populates it with the connection information that was collected in the previous steps:
    2. +
    +
    cat > ~/.my.cnf <<EOF
    +[client]
    +host=${SKYSQL_FQDN}
    +port=${SKYSQL_PORT}
    +user=${SKYSQL_USERNAME}
    +password="${SKYSQL_PASSWORD}"
    +EOF
    +
    +
      +
    1. +

      Set the file system permissions for theĀ .my.cnfĀ file to ensure that other users can't read it:

      +
      $ chmod 600 ~/.my.cnf
      +
      +
    2. +
    3. +

      When all the connection parameters are in yourĀ ~/.my.cnfĀ file, theĀ mariadbĀ clientĀ can connect without specifying any command-line options:

      +
      $ mariadb
      +
      +
    4. +
    +

    Resources

    + + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Quickstart/Launch DB using the Terraform Provider/index.html b/Quickstart/Launch DB using the Terraform Provider/index.html new file mode 100644 index 00000000..7c285628 --- /dev/null +++ b/Quickstart/Launch DB using the Terraform Provider/index.html @@ -0,0 +1,3426 @@ + + + + + + + + + + + + + + + + + + + + + + + Launch DB using the Terraform Provider - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Launch DB using the Terraform Provider

    +

    This walkthrough explains how to launch database services and manage the lifecycle of database services using the Terraform provider.

    +

    For users who prefer other interfaces, SkySQL offers the following alternatives:

    +
      +
    • Use theĀ PortalĀ in a web browser
    • +
    • Use theĀ DBaaS APIĀ with a REST client
    • +
    +

    This walkthrough demonstrates a service configuration that is suitable for a quick test. A more customized configuration should be selected for performance testing or for alignment to the needs of production workloads.

    +
    +

    Note

    +

    This procedure uses Terraform. HashiCorp officially supports Terraform on several Linux distributions, but HashiCorp also provides binaries for Microsoft Windows, macOS, and other operating systems.

    +

    For a list of operating systems that are officially supported for Terraform, see "HashiCorp Terraform Documentation: Supported Operating Systems".

    +

    For a list of operating systems that have binaries available for Terraform, see "HashiCorp Terraform Documentation: Install Terraform".

    +
    +

    Dependencies

    +
      +
    • This procedure requires Terraform to be installed. For information about how to install Terraform, see "HashiCorp Terraform Documentation: Install Terraform".
    • +
    • The examples in this procedure also useĀ jq, a JSON parsing utility.Ā jqĀ is available for Linux, macOS, and MS Windows. InstallĀ jqĀ then proceed.
    • +
    • The examples in this procedure also useĀ curl, a data transfer utility.Ā curlĀ is available for Linux, macOS, and MS Windows. InstallĀ curlĀ then proceed.
    • +
    • The examples in this procedure also useĀ wget, a file download utility.Ā GNU WgetĀ is available for Linux, macOS, and MS Windows. InstallĀ wgetĀ then proceed.
    • +
    • The examples in this procedure also use exported environment variables that are compatible with Bourne-like shells (such asĀ sh,Ā bash, andĀ zsh).
    • +
    +

    Launch a Service

    +

    Step 1: Generate API Key

    +
      +
    1. Go to theĀ Generate API KeyĀ page.
    2. +
    3. Fill out a name for the API key
    4. +
    5. Click the "Create" button.
    6. +
    7. Click the copy button to copy the API key.
    8. +
    9. Store the API key somewhere safe as it is showed only once during the creation. SkySQL platform does not store it anywhere.
    10. +
    +

    Step 2: Create Terraform Project Directory

    +

    Create a directory for your Terraform project and change to the directory:

    +
    mkdir -p ~/skysql-nr-tf
    +cd ~/skysql-nr-tf
    +
    +

    Step 3: CreateĀ main.tf

    +

    In the Terraform project directory, create aĀ main.tfĀ file that contains the following:

    + +
    # ---------------------
    +# Provider Requirements
    +# ---------------------
    +# TF Documentation: https://developer.hashicorp.com/terraform/language/providers/requirements
    +
    +terraform {
    +  required_providers {
    +    skysql = {
    +      source          = "registry.terraform.io/skysqlinc/skysql"
    +    }
    +  }
    +}
    +
    +# ----------------------
    +# Provider Configuration
    +# ----------------------
    +# TF Documentation: https://developer.hashicorp.com/terraform/language/providers/configuration
    +
    +provider "skysql" {
    +   access_token       = var.api_key
    +}
    +
    +# ---------
    +# Resources
    +# ---------
    +# TF Documentation: https://developer.hashicorp.com/terraform/language/resources/syntax
    +
    +# Create a service
    +resource "skysql_service" "default" {
    +  service_type        = var.service_type
    +  topology            = var.topology
    +  cloud_provider      = var.cloud_provider
    +  region              = var.region
    +  availability_zone   = coalesce(var.availability_zones, data.skysql_availability_zones.default.zones[0].name)
    +  architecture        = var.architecture
    +  size                = var.size
    +  storage             = var.storage
    +  nodes               = var.nodes
    +  version             = coalesce(var.sw_version, data.skysql_versions.default.versions[0].name)
    +  name                = var.name
    +  ssl_enabled         = var.ssl_enabled
    +  deletion_protection = var.deletion_protection
    +  wait_for_creation   = true
    +  wait_for_deletion   = true
    +  wait_for_update     = true
    +  is_active           = true
    +  allow_list          = [
    +     {
    +        "ip"          : var.ip_address,
    +        "comment"     : var.ip_address_comment
    +     }
    +  ]
    +}
    +
    +# ------------
    +# Data Sources
    +# ------------
    +# TF Documentation: https://developer.hashicorp.com/terraform/language/data-sources
    +
    +# Retrieve the list of projects. Projects are a way to group services.
    +data "skysql_projects" "default" {}
    +
    +# Retrieve the list of available versions for a specific topology
    +data "skysql_versions" "default" {
    +  topology            = var.topology
    +}
    +
    +# Retrieve the service details
    +data "skysql_service" "default" {
    +  service_id          = skysql_service.default.id
    +}
    +
    +# Retrieve the service default credentials.
    +# When the service is created please change the default credentials
    +data "skysql_credentials" "default" {
    +  service_id          = skysql_service.default.id
    +}
    +
    +data "skysql_availability_zones" "default" {
    +  region              = var.region
    +  filter_by_provider  = var.cloud_provider
    +}
    +
    +

    Step 4: CreateĀ outputs.tf

    +

    In the Terraform project directory, create anĀ outputs.tfĀ file that contains theĀ output valuesĀ used to display metadata about the SkySQL service:

    +
    # -------------
    +# Output Values
    +# -------------
    +# TF Documentation: https://developer.hashicorp.com/terraform/language/values/outputs
    +
    +output "skysql_projects" {
    +  value = data.skysql_projects.default
    +}
    +
    +# Show the service details
    +output "skysql_service" {
    +  value = data.skysql_service.default
    +}
    +
    +# Show the service credentials
    +output "skysql_credentials" {
    +  value     = data.skysql_credentials.default
    +  sensitive = true
    +}
    +
    +# Example how you can generate a command line for the database connection
    +output "skysql_cmd" {
    +  value = "mariadb --host ${data.skysql_service.default.fqdn} --port 3306 --user ${data.skysql_service.default.service_id} -p --ssl-verify-server-cert"
    +}
    +
    +output "availability_zones" {
    +  value = data.skysql_availability_zones.default
    +}
    +
    +

    Step 5: CreateĀ variables.tf

    +

    In the Terraform project directory, create aĀ variables.tfĀ file that contains theĀ input variablesĀ used to configure the SkySQL service:

    +
    # ---------------
    +# Input Variables
    +# ---------------
    +# TF Documentation: https://developer.hashicorp.com/terraform/language/values/variables
    +
    +variable "api_key" {
    +   type                 = string
    +   sensitive            = true
    +   description          = "The SkySQL API Key generated at: https://app.skysql.com/user-profile/api-keys"
    +}
    +
    +variable "service_type" {
    +   type                 = string
    +   default              = "transactional"
    +   description          = "Specify \"transactional\" or \"analytical\". For additiona information, See https://apidocs.skysql.com/#/Offering/get_provisioning_v1_service_types"
    +}
    +
    +variable "topology" {
    +   type                 = string
    +   default              = "es-single"
    +   description          = "Specify a topology. For additional information, see: https://apidocs.skysql.com/#/Offering/get_provisioning_v1_topologies"
    +}
    +
    +variable "cloud_provider" {
    +    type                 = string
    +    default              = "gcp"
    +    description          = "Specify the cloud provider. For additional information, see: https://apidocs.skysql.com/#/Offering/get_provisioning_v1_providers"
    +}
    +
    +variable "region" {
    +   type                 = string
    +   default              = "us-central1"
    +   description          = "Specify the region. For additional information, see: https://apidocs.skysql.com/#/Offering/get_provisioning_v1_regions"
    +}
    +
    +variable "availability_zone" {
    +   type                 = string
    +   default              = null
    +   description          = "Specify the availability zone for the cloud provider and region. For additional information, see: https://apidocs.skysql.com/#/Offering/get_provisioning_v1_providers__provider_name__zones"
    +}
    +
    +variable "architecture" {
    +   type                 = string
    +   default              = "amd64"
    +   description          = "Specify a hardware architecture. For additional information, see: https://apidocs.skysql.com/#/CPU-Architectures/get_provisioning_v1_cpu_architectures"
    +}
    +
    +variable "size" {
    +   type                 = string
    +   default              = "sky-2x8"
    +   description          = "Specify the database node instance size. For additional information, see: https://apidocs.skysql.com/#/Offering/get_provisioning_v1_sizes"
    +}
    +
    +variable "storage" {
    +   type                 = number
    +   default              = 100
    +   description          = "Specify a transactional storage size. For additional information, see: https://apidocs.skysql.com/#/Offering/get_provisioning_v1_topologies__topology_name__storage_sizes"
    +}
    +
    +variable "nodes" {
    +   type                 = number
    +   default              = 1
    +   description          = "Specify a node count. For additional information, see: https://apidocs.skysql.com/#/Offering/get_provisioning_v1_topologies__topology_name__nodes"
    +}
    +
    +variable "sw_version" {
    +   type                 = string
    +   default              = null
    +   description          = "Specify a software version. For additional information, see: https://apidocs.skysql.com/#/Offering/get_provisioning_v1_versions"
    +}
    +
    +variable "name" {
    +   type                 = string
    +   default              = "skysql-quickstart"
    +   description          = "Specify a name for the service 4-24 characters in length."
    +}
    +
    +variable "ssl_enabled" {
    +   type                 = bool
    +   default              = true
    +   description          = "Specify whether TLS should be enabled for the service."
    +}
    +
    +variable "deletion_protection" {
    +   type                 = bool
    +   default              = true
    +   description          = "Specify whether the service can be deleted via Terraform (false) or whether trying to do so raises an error (true)"
    +}
    +
    +variable "ip_address" {
    +   type                 = string
    +   description          = "Specify an IP address in CIDR format to add to the service's IP allowlist."
    +}
    +
    +variable "ip_address_comment" {
    +   type                 = string
    +   description          = "Specify a comment describing the IP address."
    +}
    +
    +

    The variables are configured in the next step.

    +

    Step 6: Configure Service in aĀ .tfvarsĀ File

    +

    AĀ .tfvarsĀ fileĀ can be used to configure the service using the input variables.

    +

    For example:

    +
    api_key             = "... key data ..."
    +service_type        = "transactional"
    +topology            = "es-single"
    +cloud_provider      = "gcp"
    +region              = "us-central1"
    +availability_zone   = null
    +architecture        = "amd64"
    +size                = "sky-2x8"
    +storage             = 100
    +nodes               = 1
    +sw_version          = null
    +name                = "skysql-nr-quickstart"
    +ssl_enabled         = true
    +deletion_protection = true
    +ip_address          = "192.0.2.10/32"
    +ip_address_comment  = "Describe the IP address"
    +
    +

    The input variables should be customized for your own needs:

    + +

    The following steps assume that the file is calledĀ skysql-nr-quickstart.tfvars.

    +

    Step 7: RunĀ terraformĀ init

    +

    Initialize the Terraform project directory and download the Terraform provider from theĀ Terraform RegistryĀ by executing theĀ terraformĀ initĀ command:

    +
    terraform init
    +
    +

    If you need to download the provider manually, see "Manually Install Provider from Binary Distribution".

    +

    Step 8: RunĀ terraformĀ plan

    +

    Create a Terraform execution plan by executing theĀ terraformĀ planĀ commandĀ and specifying the path to theĀ .tfvarsĀ file:

    +
    terraform plan -var-file="skysql-nr-quickstart.tfvars"
    +
    +

    Step 9: RunĀ terraformĀ apply

    +

    Execute the Terraform execution plan and create the SkySQL service by executing theĀ terraformĀ applyĀ commandĀ and specifying the path to theĀ .tfvarsĀ file:

    +
    terraform apply -var-file="skysql-nr-quickstart.tfvars"
    +
    +

    Terraform prints the plan from the previous step again and prompts the user to confirm that the plan should be applied:

    +
    Do you want to perform these actions?
    +  Terraform will perform the actions described above.
    +  Only 'yes' will be accepted to approve.
    +
    +  Enter a value: yes
    +
    +

    Then Terraform creates the objects and prints status messages:

    +
    skysql_service.default: Creating...
    +skysql_service.default: Still creating... [10s elapsed]
    +skysql_service.default: Still creating... [20s elapsed]
    +skysql_service.default: Still creating... [30s elapsed]
    +skysql_service.default: Still creating... [40s elapsed]
    +skysql_service.default: Still creating... [50s elapsed]
    +skysql_service.default: Still creating... [1m0s elapsed]
    +skysql_service.default: Still creating... [1m10s elapsed]
    +skysql_service.default: Still creating... [1m20s elapsed]
    +skysql_service.default: Still creating... [1m30s elapsed]
    +skysql_service.default: Still creating... [1m40s elapsed]
    +skysql_service.default: Still creating... [1m50s elapsed]
    +skysql_service.default: Still creating... [2m0s elapsed]
    +skysql_service.default: Still creating... [2m10s elapsed]
    +skysql_service.default: Still creating... [2m20s elapsed]
    +skysql_service.default: Still creating... [2m30s elapsed]
    +skysql_service.default: Still creating... [2m40s elapsed]
    +skysql_service.default: Still creating... [2m50s elapsed]
    +skysql_service.default: Still creating... [3m0s elapsed]
    +skysql_service.default: Still creating... [3m10s elapsed]
    +skysql_service.default: Still creating... [3m20s elapsed]
    +skysql_service.default: Still creating... [3m30s elapsed]
    +skysql_service.default: Creation complete after 3m40s [id=dbpgf00000001]
    +data.skysql_credentials.default: Reading...
    +data.skysql_service.default: Reading...
    +data.skysql_service.default: Read complete after 0s [name=skysql-nr-quickstart]
    +data.skysql_credentials.default: Read complete after 0s
    +
    +Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
    +
    +

    Then Terraform prints the outputs.

    +

    Step 10: Obtain Connection Credentials

    +

    Obtain the connection credentials for the new SkySQL service by executing the following commands:

    +
      +
    1. Obtain the connection command from theĀ terraform.tfstateĀ file:
    2. +
    +
     jq ".outputs.skysql_cmd" terraform.tfstate
    +
    +
    mariadb --host dbpgf00000001.sysp0000.db.skysql.net --port 3306 \
    +   --user dbpgf00000001 -p --ssl-verify-server-cert
    +
    +
      +
    1. Obtain the user password from theĀ terraform.tfstateĀ file:
    2. +
    +
    jq ".outputs.skysql_credentials.value.password" terraform.tfstate \
    +      "..password string.."
    +
    +

    Step 11: Connect

    +

    Connect to the SkySQL service by executing the connection command from the previous step:

    +
    mariadb --host dbpgf00000001.sysp0000.db.skysql.net --port 3306 \
    +   --user dbpgf00000001 -p --ssl-verify-server-cert
    +
    +

    When prompted, type the password and press enter to connect:

    +
    Enter password:
    +Welcome to the MariaDB monitor.  Commands end with ; or \g.
    +Your MariaDB connection id is 11691
    +Server version: 10.11.6-MariaDB-log MariaDB Server
    +
    +Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.
    +
    +Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
    +
    +MariaDB [(none)]>
    +
    +

    Step 12: RunĀ terraformĀ destroy

    +

    Delete the service by executing theĀ terraformĀ destroyĀ commandĀ and specifying the path to theĀ .tfvarsĀ file:

    +
    terraform destroy -var-file="skysql-nr-quickstart.tfvars"
    +
    +

    Terraform prints the plan to delete the service and prompts the user to confirm that the plan should be applied:

    +
    Do you really want to destroy all resources?
    +Terraform will destroy all your managed infrastructure, as shown above.
    +There is no undo. Only 'yes' will be accepted to confirm.
    +
    +Enter a value: yes
    +
    +

    If deletion protection is enabled for the resources, the operation raises an error:

    +
    ā”‚ Error: Can not delete service
    +ā”‚
    +ā”‚ Deletion protection is enabled
    +ā•µ
    +
    +

    If deletion protection is not enabled for the resources, Terraform deletes the resources and prints status messages:

    +
    skysql_service.default: Destroying... [id=dbpgf00000001]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 10s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 20s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 30s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 40s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 50s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 1m0s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 1m10s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 1m20s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 1m30s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 1m40s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 1m50s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 2m0s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 2m10s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 2m20s elapsed]
    +skysql_service.default: Still destroying... [id=dbpgf00000001, 2m30s elapsed]
    +skysql_service.default: Destruction complete after 2m38s
    +
    +Destroy complete! Resources: 1 destroyed.
    +
    +

    Manually Install Provider from Binary Distribution

    +

    The SkySQL New Release Terraform provider can be downloaded from theĀ GitHub releases pageĀ as a binary distribution and manually installed.

    +

    Manually Install Provider on Linux

    +

    WithĀ Linux, manually install the provider on the target system by performing the following steps in the same Bash terminal:

    +
      +
    1. +

      Set some environment variables to configure your provider version, OS, and architecture:

      +
      export TF_PROVIDER_RELEASE=3.0.0
      +export TF_PROVIDER_OS=linux
      +export TF_PROVIDER_ARCH=amd64
      +
      +

      ForĀ TF_PROVIDER_ARCH, the following architectures are supported on Linux:

      +
        +
      • 386
      • +
      • amd64
      • +
      • arm
      • +
      • arm64
      • +
      • Download the provider from GitHub usingĀ wget:
      • +
      +
      wget -q https://github.com/skysqlinc/terraform-provider-skysql/releases/download/v3.0.0/terraform-provider-skysql_3.0.0_linux_amd64.zip
      +
      +
    2. +
    3. +

      Create a Terraform plugin directory:

      +
      mkdir -p ~/.terraform.d/plugins/registry.terraform.io/skysqlinc/skysql
      +
      +
    4. +
    5. +

      Move the provider's binary distribution to the Terraform plugin directory:

      +
      mv terraform-provider-skysql_${TF_PROVIDER_RELEASE}_${TF_PROVIDER_OS}_${TF_PROVIDER_ARCH}.zip ~/.terraform.d/plugins/registry.terraform.io/skysqlinc/skysql/
      +
      +
    6. +
    7. +

      Verify that the provider's binary distribution is present in the Terraform plugin directory:

      +
      ls -l ~/.terraform.d/plugins/registry.terraform.io/skysqlinc/skysql/
      +
      +
    8. +
    +

    Manually Install Provider on macOS

    +

    WithĀ macOS, manually install the provider on the target system by performing the following steps in the same macOS Terminal:

    +
      +
    1. +

      IfĀ HomebrewĀ is not installed, install it:

      +
      /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
      +
      +
    2. +
    3. +

      InstallĀ wgetĀ using Homebrew:

      +
      brew install wget
      +
      +
    4. +
    5. +

      Set some environment variables to configure your provider version, OS, and architecture:

      +
      export TF_PROVIDER_RELEASE=1.1.0
      +export TF_PROVIDER_OS=darwin
      +export TF_PROVIDER_ARCH=arm64
      +
      +

      ForĀ TF_PROVIDER_ARCH, the following architectures are supported on macOS: + - amd64 + - arm64

      +
    6. +
    7. +

      Download the provider from GitHub usingĀ wget:

      +
      wget -q https://github.com/skysqlinc/terraform-provider-skysql/releases/download/v3.0.0/terraform-provider-skysql_3.0.0_darwin_arm64.zip
      +
      +
    8. +
    9. +

      Create a Terraform plugin directory:

      +
      mkdir -p ~/.terraform.d/plugins/registry.terraform.io/skysqlinc/skysql
      +
      +
    10. +
    11. +

      Move the provider's binary distribution to the Terraform plugin directory:

      +
      mv terraform-provider-skysql_${TF_PROVIDER_RELEASE}_${TF_PROVIDER_OS}_${TF_PROVIDER_ARCH}.zip ~/.terraform.d/plugins/registry.terraform.io/skysqlinc/skysql/
      +
      +
    12. +
    13. +

      Verify that the provider's binary distribution is present in the Terraform plugin directory:

      +
      ls -l ~/.terraform.d/plugins/registry.terraform.io/skysqlinc/skysql/
      +
      +
    14. +
    +

    Resources

    + + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Quickstart/autonomous.png b/Quickstart/autonomous.png new file mode 100644 index 00000000..c8b1fbb7 Binary files /dev/null and b/Quickstart/autonomous.png differ diff --git a/Quickstart/index.html b/Quickstart/index.html new file mode 100644 index 00000000..f0b9c75d --- /dev/null +++ b/Quickstart/index.html @@ -0,0 +1,2656 @@ + + + + + + + + + + + + + + + + + + + + + + + Quickstart - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Quickstart

    +

    It only takes a few minutes to launch a standalone or clustered database on SkySQL. You can pick from about 30 global regions and launch on AWS or GCP.

    +

    You have three choices to provision a DB on SkySQL :

    +

    This Quickstart explains how to launch database services and manage the lifecycle of database services using theĀ PortalĀ in a web browser.

    +

    For users who prefer other interfaces, SkySQL offers the following alternatives:

    +
      +
    • Use the SkySQL web Portal. Make your choices with a few clicks and hit Launch.
    • +
    • Use theĀ DBaaS APIĀ with a REST client
    • +
    • Use theĀ Terraform provider
    • +
    +

    Step 1: Register for SkySQL

    +

    Goto app.skysql.com to sign up. You can sign up using your Google, Github or LinkedIn credentials. Or, just use your Email address to sign up.

    +

    skysql-id

    +

    Step 2: Launch a Service

    +
      +
    1. +

      Log in to the SkySQL PortalĀ and from the Dashboard, click the + Launch New Service button.

      +
    2. +
    3. +

      From the launch interface, select the choices detailed below.

      +

      Select:

      +
        +
      • Transactions and then Enterprise Server with Replicas
      • +
      • AWS andĀ Ohio, USA (us-east-2), or Google Cloud andĀ Iowa, USA (us-central1), or a region of your choice
      • +
      • Since this Quickstart is a simple test, select:
          +
        • The smallest instance size ARM64, Sky-2x8 for AWS
        • +
        • 100GB of gp3 storage with default 3000 IOPS and 125 MB/s throughput
        • +
        +
      • +
      • Name the service "quickstart-1
      • +
      • Select Add my current IP: 99.43.xxx.xxx in the Security section
      • +
      • Then, click the Launch Service button.
      • +
      +

      launch-service

      +

      For additional information on available selections, see "Service Launch".

      +
    4. +
    5. +

      You will be returned to the Dashboard where your service will be in a Creating state.

      +
    6. +
    +

    When the service reaches "Healthy" state, go to the next step. It typically takes about 5 mins or less to launch a new DB.

    +

    Step 3: Observe, Scale

    +

    Monitoring

    +

    You can monitor all the important database and OS metrics from the dashboard. The monitoring UI also allows you to view,download any/all logs - error, info or Audit logs.

    +

    Basic status is shown on the Dashboard.

    +

    To see expanded status and metrics information:

    +
      +
    1. From the Dashboard, click on the service name. (This is "quickstart-1" if you used the suggested name.)
    2. +
    3. From the Monitoring Dashboard, you can choose to view service (Service Overview) or server details from the navigation tabs.
    4. +
    5. +

      Specific views are provided for different sets of metrics. These views can be accessed using the buttons in the upper-right corner. From the service overview, views include Status, Lags, Queries, Database and System.

      +

      monitoring

      +

      Monitoring Dashboard

      +
    6. +
    +

    Scaling

    +

    SkySQL features automatic rule-based scaling (Autonomous) and manual on-demand scaling.

    +

    With automatic scaling, node count (horizontal) and node size (vertical) changes can be triggered based on load. Additionally, storage capacity expansion can be triggered based on usage. These Autonomous features are opt-in. For additional information, see "Autonomous".

    +

    autonomous

    +

    Autonomous

    +

    With manual scaling, you can perform horizontal scaling (In/Out), vertical scaling (Up/Down), and storage expansion on-demand using Self-Service Operations. For additional information, see "Self-Service Operations".

    +

    scaling

    +

    Self-Service Scaling of Nodes

    +

    Step 4: Tear-down

    +

    When you are done with your testing session, you can stop the service. When a service is stopped, storage charges continue to accrue, but compute charges pause until the service is started again.

    +

    When you are done with testing, you can delete the service.

    +

    Stopping, starting, and deleting a service are examples of Self-Service Operations that you can perform through the Portal.

    +

    For additional information, see "Self-Service Operations".

    +

    Launch DB using the REST API

    +

    Launch DB using the Terraform Provider

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Quickstart/launch-service.png b/Quickstart/launch-service.png new file mode 100644 index 00000000..b2521ff6 Binary files /dev/null and b/Quickstart/launch-service.png differ diff --git a/Quickstart/monitoring.png b/Quickstart/monitoring.png new file mode 100644 index 00000000..60fe5eb7 Binary files /dev/null and b/Quickstart/monitoring.png differ diff --git a/Quickstart/scaling.png b/Quickstart/scaling.png new file mode 100644 index 00000000..45bd18c2 Binary files /dev/null and b/Quickstart/scaling.png differ diff --git a/Quickstart/skysql-id.png b/Quickstart/skysql-id.png new file mode 100644 index 00000000..adf8a040 Binary files /dev/null and b/Quickstart/skysql-id.png differ diff --git a/Reference Guide/Instance Size Choices/index.html b/Reference Guide/Instance Size Choices/index.html new file mode 100644 index 00000000..6ad887fa --- /dev/null +++ b/Reference Guide/Instance Size Choices/index.html @@ -0,0 +1,2962 @@ + + + + + + + + + + + + + + + + + + + + + + + Instance Size Choices - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Instance Size Choices

    +

    Instance size choices are specific to theĀ cloud provider,Ā topology,Ā region, andĀ hardware architecture.

    + + +

    MariaDB Server

    +

    For Foundation tier:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Instance SizeCloud ProviderCPUMemory
    sky-2x4aws2 vCPU4 GB
    sky-2x8aws, gcp, azure2 vCPU8 GB
    sky-4x16aws, gcp, azure4 vCPU16 GB
    sky-4x32aws, gcp, azure4 vCPU32 GB
    sky-8x32aws, gcp, azure8 vCPU32 GB
    sky-8x64aws, gcp, azure8 vCPU64 GB
    sky-16x64aws, gcp, azure16 vCPU64 GB
    sky-16x128aws, gcp, azure16 vCPU128 GB
    +

    For Power tier:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Instance SizeCloud ProviderCPUMemory
    sky-2x4aws2 vCPU4 GB
    sky-2x8aws, gcp, azure2 vCPU8 GB
    sky-4x16aws, gcp, azure4 vCPU16 GB
    sky-4x32aws, gcp, azure4 vCPU32 GB
    sky-8x32aws, gcp, azure8 vCPU32 GB
    sky-8x64aws, gcp, azure8 vCPU64 GB
    sky-16x64aws, gcp, azure16 vCPU64 GB
    sky-16x128aws, gcp, azure16 vCPU128 GB
    sky-32x128aws, gcp, azure32 vCPU128 GB
    sky-32x256aws, gcp, azure32 vCPU256 GB
    sky-64x256aws, gcp, azure64 vCPU256 GB
    sky-64x512aws, gcp, azure64 vCPU512 GB
    +

    MaxScale

    +

    With Power tier, the following instance sizes can be selected for MaxScale nodes:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Instance SizeCloud ProviderCPUMemory
    sky-2x4aws2 vCPU4 GB
    sky-2x8aws, gcp, azure2 vCPU8 GB
    sky-4x16aws, gcp, azure4 vCPU16 GB
    sky-8x32aws, gcp, azure8 vCPU32 GB
    sky-16x64aws, gcp, azure16 vCPU64 GB
    sky-32x128aws, gcp, azure32 vCPU128 GB
    sky-64x256aws, gcp, azure64 vCPU256 GB
    +

    REST Client

    +

    A REST client can use the SkySQL DBaaS API to query instance size selections and choose an instance size for a new service.

    +

    Query Database Node Options with REST Client

    +

    A REST client can query the SkySQL DBaaS API for the database node instance size selections for a specific cloud provider, architecture, and topology.

    +

    To see the available database node instance sizes for a topology, useĀ curlĀ to call theĀ /provisioning/v1/sizesĀ API endpointĀ withĀ type=serverĀ set:

    +
    curl -sS --location \
    +   --header "X-API-Key: ${API_KEY}" \
    +   'https://api.skysql.com/provisioning/v1/sizes?architecture=amd64&service_type=transactional&provider=gcp&topology=es-replica&type=server' \
    +   | jq .
    +
    +
    [
    +  {
    +    "id": "37629543-65d2-11ed-8da6-2228d0ae81af",
    +    "name": "sky-2x8",
    +    "display_name": "Sky-2x8",
    +    "service_type": "transactional",
    +    "provider": "gcp",
    +    "tier": "foundation",
    +    "architecture": "amd64",
    +    "cpu": "2 vCPU",
    +    "ram": "8 GB",
    +    "type": "server",
    +    "default_maxscale_size_name": "sky-2x8",
    +    "updated_on": "2022-11-16T17:15:06Z",
    +    "created_on": "2022-11-16T17:15:06Z",
    +    "is_active": true,
    +    "topology": "es-replica"
    +  },
    +  {
    +    "id": "37629489-65d2-11ed-8da6-2228d0ae81af",
    +    "name": "sky-4x16",
    +    "display_name": "Sky-4x16",
    +    "service_type": "transactional",
    +    "provider": "gcp",
    +    "tier": "foundation",
    +    "architecture": "amd64",
    +    "cpu": "4 vCPU",
    +    "ram": "16 GB",
    +    "type": "server",
    +    "default_maxscale_size_name": "sky-2x8",
    +    "updated_on": "2022-11-16T17:15:06Z",
    +    "created_on": "2022-11-16T17:15:06Z",
    +    "is_active": true,
    +    "topology": "es-replica"
    +  },
    +....
    +
    +]
    +
    +

    Query MaxScale Node Options with REST Client

    +

    A REST client can query the SkySQL DBaaS API for the MaxScale node instance size selections for a specific cloud provider, architecture, and topology.

    +

    To see the default MaxScale instance size for a topology, cloud, and architecture, useĀ curlĀ to call theĀ /provisioning/v1/sizesĀ API endpoint:

    +
    curl -sS --location \
    +   --header "X-API-Key: ${API_KEY}" \
    +   'https://api.skysql.com/provisioning/v1/sizes?provider=gcp&architecture=amd64&topology=es-replica' \
    +   | jq .
    +
    +
    [
    +   {
    +     "id": "c0666ab8-4a3b-11ed-8853-b278760e6ab5",
    +     "name": "sky-2x8",
    +     "display_name": "Sky-2x8",
    +     "service_type": "transactional",
    +     "provider": "gcp",
    +     "tier": "foundation",
    +     "architecture": "amd64",
    +     "cpu": "2 vCPU",
    +     "ram": "8 GB",
    +     "type": "server",
    +     "default_maxscale_size_name": "sky-2x8",
    +     "updated_on": "2022-10-12T14:40:00Z",
    +     "created_on": "2022-10-12T14:40:00Z",
    +     "is_active": true,
    +     "topology": "es-replica"
    +   }
    +]
    +
    +

    TheĀ default_maxscale_size_nameĀ attribute shows the default MaxScale instance size.

    +

    To see the available MaxScale node instance sizes for a topology, useĀ curlĀ to call theĀ /provisioning/v1/sizesĀ API endpointĀ withĀ type=proxyĀ set:

    +
    curl -sS --location \
    +   --header "X-API-Key: ${API_KEY}" \
    +   'https://api.skysql.com/provisioning/v1/sizes?architecture=amd64&service_type=transactional&provider=gcp&topology=es-replica&type=proxy' \
    +   | jq .
    +
    +

    The output can show different instance sizes, depending on whether your SkySQL account is Foundation tier or Power tier.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Reference Guide/MaxScale Reference/index.html b/Reference Guide/MaxScale Reference/index.html new file mode 100644 index 00000000..b60ecea7 --- /dev/null +++ b/Reference Guide/MaxScale Reference/index.html @@ -0,0 +1,4115 @@ + + + + + + + + + + + + + + + + + + + MaxScale Reference - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    MaxScale Reference

    +

    MariaDB MaxScale 22.08 Authenticators

    +

    The following Authenticators are supported by MariaDB MaxScale 22.08:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    MethodAuthenticatorEnterprise Server PluginXpand PluginDescription
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/authenticator/GSSAPI/GSSAPIAuthhttps://mariadb.com/docs/server/ref/mdb/plugins/gssapi/Authenticates client connections using a GSSAPI authentication service
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/authenticator/Native/MariaDBAuthhttps://mariadb.com/docs/skysql-dbaas/ref/mdb/plugins/mysql_native_password/https://mariadb.com/docs/xpand/security/authentication/xpand/mysql_native_password/Authenticates client connections using the native password authentication method
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/authenticator/PAM/PAMAuthhttps://mariadb.com/docs/server/ref/mdb/plugins/pam,auth_pam.so/Authenticates client connections using a Pluggable Authentication Modules (PAM) service
    +

    To see Authenticators supported in other versions, seeĀ "Authenticators by MariaDB MaxScale Version".

    +

    MariaDB MaxScale 22.08 Filters

    +

    The following Filters are supported by MariaDB MaxScale 22.08:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    FilterTypeDescription
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/binlogfilter/Special RoutingBinary Log Filter can be used with theĀ binlogrouterĀ to selectively replicate Binary Log events to Replica Servers
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/cache/PerformanceCaches the result-sets ofĀ https://mariadb.com/docs/skysql-dbaas/ref/mdb/sql-statements/SELECT/Ā statements to improve query performance
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/ccrfilter/Server SelectionConsistent Critical Read (CCR) Filter detects when a statement modifies the database, and it attaches routing hints to any subsequent statements, so they get routed to the master
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/hintfilter/Server SelectionHint Filter allows services to interpret routing hints, which can be specified in a comment when a query is executed. Note that if master_accept_reads is enabled, MaxScale will still route to both master and slave
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/insertstream/PerformanceInsert Stream Filter translates bulkĀ https://mariadb.com/docs/skysql-dbaas/ref/mdb/sql-statements/INSERT/Ā statements into CSV data that is streamed to the backend server and loaded using theĀ https://mariadb.com/docs/skysql-dbaas/ref/mdb/sql-statements/LOAD_DATA_INFILE/Ā statement
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/luafilter/ProgrammaticLua Filter processes queries with the specified Lua scripts (experimental)
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/masking/SecurityMasking Filter obfuscates the return values of specified columns
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/maxrows/PerformanceMax Rows Filter limits the number of rows thatĀ https://mariadb.com/docs/skysql-dbaas/ref/mdb/sql-statements/SELECT/Ā statements, prepared statements, and stored procedures can return
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/namedserverfilter/Server SelectionNamed Server Filter compares queries to specified Regular Expressions, and when the query matches, the filter applies the specified routing hint to the query
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/qlafilter/SecurityQuery Log All (QLA) Filter logs matching queries to a CSV file
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/regexfilter/ProgrammaticRegex Filter rewrites matching queries using Regular Expressions
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/rewritefilter/Rewrites queries based on a query template
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/tee/Special RoutingTee Filter copies client requests to other services
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/throttlefilter/PerformanceThrottle Filter limits the maximum frequency of queries per second allowed for a database session
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/topfilter/PerformanceTop Filter logs the top queries by execution time
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/filter/tpmfilter/PerformanceTransaction Performance Monitoring (TPM) Filter logs information on committed transactions for performance analysis (experimental)
    +

    To see Filters supported in other versions, seeĀ "Filters by MariaDB MaxScale Version".

    +

    MariaDB MaxScale 22.08 Global Parameters

    +

    The following Global Parameters are supported by MariaDB MaxScale 22.08:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Global ParameterDescription
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_auth/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_auth/Enables HTTP Basic Access authentication for REST API
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_enabled/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_enabled/Enables the administrative interface for REST API
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_gui/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_gui/Enable admin GUI
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_host/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_host/Network interface the REST API listens on
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_jwt_algorithm/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_jwt_algorithm/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_jwt_algorithm/JWT signature algorithm
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_jwt_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_jwt_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_jwt_key/Encryption key ID for symmetric signature algorithms. If left empty, MaxScale will generate a random key that is used to sign the JWT.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_log_auth_failures/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_log_auth_failures/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_log_auth_failures/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_log_auth_failures/Enables logging authentication failures to the administrative interface
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_oidc_url/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_oidc_url/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_oidc_url/Extra public certificates used to validate externally signed JWTs
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_pam_readonly_service/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_pam_readonly_service/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_pam_readonly_service/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_pam_readonly_service/Enables PAM-based authentication served for REST API for read-only users
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_pam_readwrite_service/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_pam_readwrite_service/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_pam_readwrite_service/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_pam_readwrite_service/Enables PAM-based authentication service for REST API for users who can perform any REST API operation
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_port/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_port/Port on network interface the REST API listenes on
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_secure_gui/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_secure_gui/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_secure_gui/Only serve GUI over HTTPS
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_ca/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_ca/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_ca/Path to PEM file containing TLS Certificate Authority (CA) to use in HTTPS for REST API. Formerly admin_ssl_ca_cert.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_ca_cert/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_ca_cert/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_ca_cert/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_ca_cert/Alias for 'admin_ssl_ca'
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_cert/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_cert/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_cert/Path to PEM file containing TLS certificate to use in HTTPS for REST API
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_key/Path to PEM file containing TLS key to use in HTTPS for REST API
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_version/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_version/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_ssl_version/Minimum required TLS protocol version for the REST API
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_verify_url/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_verify_url/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/admin_verify_url/URL for third-party verification of client tokens
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/auth_connect_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/auth_connect_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/auth_connect_timeout/Amount of time to wait in seconds for authentication to the Server
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/auth_read_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/auth_read_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/auth_read_timeout/Amount of time to wait in seconds when retrieving user authentication data from the Server
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/auth_write_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/auth_write_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/auth_write_timeout/Amount of time to wait in seconds when retrieving user authentication data from the Server. MaxScale does not write authentication data to the Server.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/auto_tune/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/auto_tune/Specifies whether a MaxScale parameter whose value depends on a specific global server variable, should automatically be updated to match the variable's current value
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/cachedir/Path to the directory containing cached data
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_cluster/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_cluster/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_cluster/Cluster used for configuration synchronization. If left empty (i.e., value is ""), synchronization is not done.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_db/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_db/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_db/Database where the 'maxscale_config' table is created
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_interval/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_interval/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_interval/How often to synchronize the configuration
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_password/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_password/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_password/Password for the user used for configuration synchronization
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_timeout/Timeout for the configuration synchronization operations
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_user/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_user/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/config_sync_user/User account used for configuration synchronization
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/connector_plugindir/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/connector_plugindir/Path to MariaDB Connector C plugin directory
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/datadir/Path to the data directory
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/debug/Debug options
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/dump_last_statements/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/dump_last_statements/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/dump_last_statements/Sets condition on which MariaDB MaxScale dumps the last statement sent by the client
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/execdir/Path to directory containing executable files
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/key_manager/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/key_manager/Key manager type
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/language/Path to directory containing theĀ errmsg.sysĀ file
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/libdir/Path to the directory searched for modules
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/load_persisted_configs/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/load_persisted_configs/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/load_persisted_configs/Enables loading persistent runtime configuration changes on startup. Persistent runtime changes are saved to theĀ /var/lib/maxscale/maxscale.cnf.d/Ā directory.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/local_address/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/local_address/Sets local address or network interface to use when connecting to Servers
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_augmentation/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_augmentation/Appends logging messages with the name of the function where the message was logged (used primarily for development purposes)
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_debug/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_debug/Enables logging messages at theĀ debugĀ syslog priority
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_info/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_info/Enables logging messages at theĀ infoĀ syslog priority
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_notice/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_notice/Enables logging messages at theĀ noticeĀ syslog priority
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_throttling/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_throttling/Limit the amount of identical log messages than can be logged during a certain time period
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_warn_super_user/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_warn_super_user/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_warn_super_user/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_warn_super_user/Log a warning when a user with super privilege logs in
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_warning/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/log_warning/Enables logging messages at theĀ warningĀ syslog priority
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/logdir/Path to directory used to store log files
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/max_auth_errors_until_block/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/max_auth_errors_until_block/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/max_auth_errors_until_block/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/max_auth_errors_until_block/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/max_auth_errors_until_block/Maximum number of authentication failures allowed before temporarily blocking a host
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/max_read_amount/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/max_read_amount/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/max_read_amount/Maximum amount of data read before return to epoll_wait
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/maxlog/Logs messages to the log file
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/module_configdir/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/module_configdir/Path to directory containing module configurations
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/ms_timestamp/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/ms_timestamp/Enables millisecond precision in log timestamps
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/passive/Puts the MaxScale Instance on stand-by, Passive Instances monitor Servers and accept client connections, but take no action
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/persist_runtime_changes/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/persist_runtime_changes/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/persist_runtime_changes/Persist configurations changes done at runtime
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/persistdir/Path to directory containing persistent configurations
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/piddir/Path to the directory containing the PID file
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_classifier/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_classifier/Sets the Query Classifier module
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_classifier_args/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_classifier_args/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_classifier_args/Specifies arguments passed to the Query Classifier
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_classifier_cache_size/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_classifier_cache_size/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_classifier_cache_size/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_classifier_cache_size/Maximum size for Query Classifier Cache
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_retries/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_retries/Number of times to retry an internal query interruped by network errors
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_retry_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_retry_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/query_retry_timeout/Amount of time in seconds to wait on retried queries
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/rebalance_period/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/rebalance_period/How often should the load of the worker threads be checked and rebalancing be made
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/rebalance_threshold/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/rebalance_threshold/If the difference in load between the thread with the maximum load and the thread with the minimum load is larger than the value of this parameter, then work will be moved from the former to the latter
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/rebalance_window/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/rebalance_window/The load of how many seconds should be taken into account when rebalancing
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/retain_last_statements/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/retain_last_statements/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/retain_last_statements/Number of statements stored for each session. Used in debugging.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/session_trace/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/session_trace/Number of log entries stored in the session trace log. Used in debugging.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/session_trace_match/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/session_trace_match/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/session_trace_match/Regular expression that is matched against the contents of the session trace log and if it matches the contents are logged when the session stops
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/sharedir/
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/skip_name_resolve/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/skip_name_resolve/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/skip_name_resolve/Do not resolve client IP addresses to hostnames during authentication
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/skip_permission_checks/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/skip_permission_checks/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/skip_permission_checks/Disables user permission checks for services and monitors during startup
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/sql_mode/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/sql_mode/Specifies SQL Mode for Query Classifier
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/substitute_variables/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/substitute_variables/Sets environmental variables for cofniguration files
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/syslog/Logs messages to the syslog
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/threads/This parameter specifies how many threads will be used for handling the routing
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/users_refresh_interval/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/users_refresh_interval/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/users_refresh_interval/How often the users will be refreshed
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/users_refresh_time/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/users_refresh_time/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/users_refresh_time/How often the users can be refreshed
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/writeq_high_water/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/writeq_high_water/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/writeq_high_water/Maximum size of client-side write queue to a given Server before MaxScale blocks traffic going to the Server to allow it to catch up
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/writeq_low_water/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/writeq_low_water/https://mariadb.com/docs/skysql-dbaas/ref/mxs/global-parameters/writeq_low_water/Size the client-side write queue must drop to before MaxScale unblocks a throttled Server
    +

    To see Global Parameters supported in other versions, seeĀ "Global Parameters by MariaDB MaxScale Version".

    +

    MariaDB MaxScale 22.08 Module Parameters

    +

    The Module Parameters supported by MariaDB MaxScale 22.08 are listed below.

    +

    To see Module Parameters supported in other versions, seeĀ "Module Parameters by MariaDB MaxScale Version".

    +

    MariaDB Monitor (mariadbmon)

    +

    The parameters for mariadbmon:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ParameterDescription
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/assume_unique_hostnames/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/assume_unique_hostnames/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/assume_unique_hostnames/Assume that hostnames are unique
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auto_failover/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auto_failover/Enable automatic server failover
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auto_rejoin/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auto_rejoin/Enable automatic server rejoin
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/backend_connect_attempts,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/backend_connect_attempts,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/backend_connect_attempts,Monitor.mariadbmon/Number of connection attempts to make to a server
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/backend_connect_timeout,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/backend_connect_timeout,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/backend_connect_timeout,Monitor.mariadbmon/Connection timeout for monitor connections
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/backend_read_timeout,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/backend_read_timeout,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/backend_read_timeout,Monitor.mariadbmon/Read timeout for monitor connections
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/backend_write_timeout,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/backend_write_timeout,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/backend_write_timeout,Monitor.mariadbmon/Write timeout for monitor connections
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cooperative_monitoring_locks/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cooperative_monitoring_locks/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cooperative_monitoring_locks/Cooperative monitoring type
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cs_admin_api_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cs_admin_api_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cs_admin_api_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cs_admin_api_key/The API key used in communication with the ColumnStore admin daemon
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cs_admin_base_path/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cs_admin_base_path/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cs_admin_base_path/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cs_admin_base_path/The base path to be used when accessing the ColumnStore administrative daemon. If, for instance, a daemon URL is https://localhost:8640/cmapi/0.4.0/node/start then the admin_base_path is "/cmapi/0.4.0".
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cs_admin_port/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cs_admin_port/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/cs_admin_port/Port of the ColumnStore administrative daemon
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/demotion_sql_file/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/demotion_sql_file/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/demotion_sql_file/Path to SQL file that is executed during node demotion
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disk_space_check_interval,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disk_space_check_interval,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disk_space_check_interval,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disk_space_check_interval,Monitor.mariadbmon/How often the disk space is checked
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disk_space_threshold,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disk_space_threshold,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disk_space_threshold,Monitor.mariadbmon/Disk space threshold
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enforce_read_only_slaves/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enforce_read_only_slaves/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enforce_read_only_slaves/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enforce_read_only_slaves/Enable read_only on all slave servers
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enforce_simple_topology/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enforce_simple_topology/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enforce_simple_topology/Enforce a simple topology
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enforce_writable_master/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enforce_writable_master/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enforce_writable_master/Disable read_only on the current master server
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/events,Monitor.mariadbmon/Events that cause the script to be called
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/failcount/Number of consecutive times MaxScale can fail to reach the Primary Server before it considers it down
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/failover_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/failover_timeout/Timeout for failover
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/handle_events/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/handle_events/Manage server-side events
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/journal_max_age,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/journal_max_age,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/journal_max_age,Monitor.mariadbmon/The time the on-disk cached server states are valid for
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/maintenance_on_low_disk_space/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/maintenance_on_low_disk_space/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/maintenance_on_low_disk_space/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/maintenance_on_low_disk_space/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/maintenance_on_low_disk_space/Put the server into maintenance mode when it runs out of disk space
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_conditions/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_conditions/Conditions that the master servers must meet
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_failure_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_failure_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_failure_timeout/Master failure timeout
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/monitor_interval,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/monitor_interval,Monitor.mariadbmon/How often the servers are monitored
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/password,Monitor.mariadbmon/Password for the user used to monitor the servers
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/promotion_sql_file/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/promotion_sql_file/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/promotion_sql_file/Path to SQL file that is executed during node promotion
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/rebuild_port/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/rebuild_port/Listen port used for transferring server backup
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/replication_master_ssl/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/replication_master_ssl/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/replication_master_ssl/Enable SSL when configuring replication
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/replication_password/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/replication_password/Password for the user that is used for replication
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/replication_user/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/replication_user/User used for replication
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/script,Monitor.mariadbmon/Script to run whenever an event occurs
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/script_max_replication_lag/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/script_max_replication_lag/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/script_max_replication_lag/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/script_max_replication_lag/Replication lag limit at which the script is run
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/script_timeout,Monitor.mariadbmon/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/script_timeout,Monitor.mariadbmon/Timeout for the script
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/servers,Monitor.mariadbmon/List of servers to use
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/servers_no_promotion/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/servers_no_promotion/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/servers_no_promotion/List of servers that are never promoted
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/slave_conditions/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/slave_conditions/Conditions that the slave servers must meet
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssh_check_host_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssh_check_host_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssh_check_host_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssh_check_host_key/Is SSH host key check enabled
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssh_keyfile/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssh_keyfile/SSH keyfile. Used for running remote commands on servers.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssh_port/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssh_port/SSH port. Used for running remote commands on servers.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssh_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssh_timeout/SSH connection and command timeout
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssh_user/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssh_user/SSH username. Used for running remote commands on servers.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/switchover_on_low_disk_space/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/switchover_on_low_disk_space/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/switchover_on_low_disk_space/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/switchover_on_low_disk_space/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/switchover_on_low_disk_space/Perform a switchover when a server runs out of disk space
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/switchover_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/switchover_timeout/Timeout for switchover
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user,Monitor.mariadbmon/Username used to monitor the servers
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/verify_master_failure/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/verify_master_failure/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/verify_master_failure/Verify master failure
    +

    Read/Write Split Router (readwritesplit)

    +

    The parameters for readwritesplit:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ParameterDescription
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_all_servers,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_all_servers,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_all_servers,Router.readwritesplit/Retrieve users from all backend servers instead of only one
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/causal_reads/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/causal_reads/Configures read causality, reads subsequent to writes issued in manner to reduce replication lag
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/causal_reads_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/causal_reads_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/causal_reads_timeout/Timeout for synchronization of the Primary Server with a Replica Server during causal reads
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/connection_keepalive,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/connection_keepalive,Router.readwritesplit/How ofted idle connections are pinged
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/connection_timeout,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/connection_timeout,Router.readwritesplit/Connection idle timeout
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/delayed_retry/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/delayed_retry/Retry queries that fail to route due to connection issues
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/delayed_retry_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/delayed_retry_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/delayed_retry_timeout/Timeout for retrying queries that fail to route due to connection issues
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disable_sescmd_history,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disable_sescmd_history,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disable_sescmd_history,Router.readwritesplit/Disable session command history
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enable_root_user,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enable_root_user,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enable_root_user,Router.readwritesplit/Allow the root user to connect to this service
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/force_connection_keepalive,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/force_connection_keepalive,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/force_connection_keepalive,Router.readwritesplit/Ping connections unconditionally
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/idle_session_pool_time,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/idle_session_pool_time,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/idle_session_pool_time,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/idle_session_pool_time,Router.readwritesplit/Put connections into pool after session has been idle for this long
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/lazy_connect/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/lazy_connect/Create connections only when needed
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/localhost_match_wildcard_host,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/localhost_match_wildcard_host,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/localhost_match_wildcard_host,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/localhost_match_wildcard_host,Router.readwritesplit/Match localhost to wildcard host
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_auth_warnings,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_auth_warnings,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_auth_warnings,Router.readwritesplit/Log a warning when client authentication fails
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_debug,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_debug,Router.readwritesplit/Log debug messages for this service (debug builds only)
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_info,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_info,Router.readwritesplit/Log info messages for this service
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_notice,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_notice,Router.readwritesplit/Log notice messages for this service
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_warning,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_warning,Router.readwritesplit/Log warning messages for this service
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_accept_reads,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_accept_reads,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_accept_reads,Router.readwritesplit/Use master for reads
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_failure_mode/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_failure_mode/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_failure_mode/Master failure mode behavior
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_reconnection/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_reconnection/Reconnect to the Primary Server if it changes mid-session
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_connections,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_connections,Router.readwritesplit/Maximum number of connections
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_sescmd_history,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_sescmd_history,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_sescmd_history,Router.readwritesplit/Session command history size
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_slave_connections/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_slave_connections/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_slave_connections/Maximum number of connections the router session can use to connect to Replica Servers
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_slave_replication_lag/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_slave_replication_lag/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_slave_replication_lag/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_slave_replication_lag/Number of seconds a Replica Server is allowed to fall behind the Primary Server
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/multiplex_timeout,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/multiplex_timeout,Router.readwritesplit/How long a session can wait for a connection to become available
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/net_write_timeout,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/net_write_timeout,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/net_write_timeout,Router.readwritesplit/Network write timeout
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/optimistic_trx/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/optimistic_trx/Optimistically offload transactions to slaves
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/password,Router.readwritesplit/Password for the user used to retrieve database users
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/prune_sescmd_history,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/prune_sescmd_history,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/prune_sescmd_history,Router.readwritesplit/Prune old session command history if the limit is exceeded
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/rank,Router.readwritesplit/Service rank
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/retain_last_statements,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/retain_last_statements,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/retain_last_statements,Router.readwritesplit/Number of statements kept in memory
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/retry_failed_reads/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/retry_failed_reads/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/retry_failed_reads/Automatically retry failed reads outside of transactions
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/reuse_prepared_statements/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/reuse_prepared_statements/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/reuse_prepared_statements/Reuse identical prepared statements inside the same connection
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_trace,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_trace,Router.readwritesplit/Enable session tracing for this service
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_track_trx_state,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_track_trx_state,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_track_trx_state,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_track_trx_state,Router.readwritesplit/Track session state using server responses
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/slave_connections/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/slave_connections/Starting number of slave connections
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/slave_selection_criteria/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/slave_selection_criteria/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/slave_selection_criteria/Criteria the router uses to select Replica Servers in load balancing read operations
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/strict_multi_stmt/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/strict_multi_stmt/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/strict_multi_stmt/Routes multi-statement queries to the Primary Server
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/strict_sp_calls/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/strict_sp_calls/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/strict_sp_calls/RoutesĀ https://mariadb.com/docs/skysql-dbaas/ref/mdb/sql-statements/CALL/Ā statements to the Primary Server
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/strip_db_esc,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/strip_db_esc,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/strip_db_esc,Router.readwritesplit/Strip escape characters from database names
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay/Replays in progress transactions that fail on a different Server
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_attempts/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_attempts/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_attempts/Maximum number of times to attempt to replay failed transactions
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_checksum/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_checksum/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_checksum/Type of checksum to calculate for results
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_max_size/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_max_size/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_max_size/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_max_size/Maximum size in bytes permitted for transaction replays
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_retry_on_deadlock/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_retry_on_deadlock/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_retry_on_deadlock/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_retry_on_deadlock/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_retry_on_deadlock/Maximum number of times the router attempts to replay transactions in the event that the transaction fails due to deadlocks
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_retry_on_mismatch/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_retry_on_mismatch/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_retry_on_mismatch/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_retry_on_mismatch/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_retry_on_mismatch/Retry transaction on checksum mismatch
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/transaction_replay_timeout/Timeout for transaction replay
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/use_sql_variables_in/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/use_sql_variables_in/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/use_sql_variables_in/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/use_sql_variables_in/Where the router sends session variable queries
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user,Router.readwritesplit/Username used to retrieve database users
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file,Router.readwritesplit/Load additional users from a file
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file_usage,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file_usage,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file_usage,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file_usage,Router.readwritesplit/When and how the user accounts file is used
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/version_string,Router.readwritesplit/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/version_string,Router.readwritesplit/Custom version string to use
    +

    MariaDB Protocol

    +

    The parameters for MariaDBProtocol:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ParameterDescription
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/address,Protocol.MariaDBProtocol/Listener address
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/authenticator,Protocol.MariaDBProtocol/Listener authenticator
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/authenticator_options,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/authenticator_options,Protocol.MariaDBProtocol/Authenticator options
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/connection_init_sql_file,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/connection_init_sql_file,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/connection_init_sql_file,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/connection_init_sql_file,Protocol.MariaDBProtocol/Path to connection initialization SQL
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/port,Protocol.MariaDBProtocol/Listener port
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/protocol,Protocol.MariaDBProtocol/Listener protocol to use
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/service,Protocol.MariaDBProtocol/Service to which the listener connects to
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/socket,Protocol.MariaDBProtocol/Listener UNIX socket
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/sql_mode,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/sql_mode,Protocol.MariaDBProtocol/SQL parsing mode
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl,Protocol.MariaDBProtocol/Enable TLS for server
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca,Protocol.MariaDBProtocol/Path to the X.509 certificate authority chain file in PEM format. In MaxScale 6 and earlier, this parameter was namedĀ https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca_cert,Protocol.MariaDBProtocol/. In MaxScale 22.08,Ā ssl_ca_certĀ was renamed toĀ ssl_ca. For backward compatibility,Ā ssl_ca_certĀ can be used as an alias, but MariaDB recommends usingĀ ssl_caĀ becauseĀ ssl_ca_certĀ has been deprecated.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca_cert,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca_cert,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca_cert,Protocol.MariaDBProtocol/Alias forĀ https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca,Protocol.MariaDBProtocol/
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cert,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cert,Protocol.MariaDBProtocol/TLS public certificate
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cert_verify_depth,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cert_verify_depth,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cert_verify_depth,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cert_verify_depth,Protocol.MariaDBProtocol/TLS certificate verification depth
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cipher,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cipher,Protocol.MariaDBProtocol/TLS cipher list
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_crl,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_crl,Protocol.MariaDBProtocol/TLS certificate revocation list
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_key,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_key,Protocol.MariaDBProtocol/TLS private key
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_certificate,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_certificate,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_certificate,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_certificate,Protocol.MariaDBProtocol/Verify TLS peer certificate
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_host,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_host,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_host,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_host,Protocol.MariaDBProtocol/Verify TLS peer host
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_version,Protocol.MariaDBProtocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_version,Protocol.MariaDBProtocol/Minimum TLS protocol version
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_mapping_file/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_mapping_file/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_mapping_file/Path to user and group mapping file
    +

    Read Connection Router (readconnroute)

    +

    The parameters for readconnroute:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ParameterDescription
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_all_servers,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_all_servers,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_all_servers,Router.readconnroute/Retrieve users from all backend servers instead of only one
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/connection_keepalive,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/connection_keepalive,Router.readconnroute/How ofted idle connections are pinged
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/connection_timeout,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/connection_timeout,Router.readconnroute/Connection idle timeout
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disable_sescmd_history,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disable_sescmd_history,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disable_sescmd_history,Router.readconnroute/Disable session command history
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enable_root_user,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enable_root_user,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/enable_root_user,Router.readconnroute/Allow the root user to connect to this service
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/force_connection_keepalive,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/force_connection_keepalive,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/force_connection_keepalive,Router.readconnroute/Ping connections unconditionally
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/idle_session_pool_time,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/idle_session_pool_time,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/idle_session_pool_time,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/idle_session_pool_time,Router.readconnroute/Put connections into pool after session has been idle for this long
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/localhost_match_wildcard_host,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/localhost_match_wildcard_host,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/localhost_match_wildcard_host,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/localhost_match_wildcard_host,Router.readconnroute/Match localhost to wildcard host
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_auth_warnings,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_auth_warnings,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_auth_warnings,Router.readconnroute/Log a warning when client authentication fails
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_debug,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_debug,Router.readconnroute/Log debug messages for this service (debug builds only)
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_info,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_info,Router.readconnroute/Log info messages for this service
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_notice,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_notice,Router.readconnroute/Log notice messages for this service
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_warning,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_warning,Router.readconnroute/Log warning messages for this service
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_accept_reads,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_accept_reads,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/master_accept_reads,Router.readconnroute/Route read operations to the Primary Server or whether it only accepts write operations
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_connections,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_connections,Router.readconnroute/Maximum number of connections
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_replication_lag,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_replication_lag,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_replication_lag,Router.readconnroute/Maximum acceptable replication lag
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_sescmd_history,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_sescmd_history,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_sescmd_history,Router.readconnroute/Session command history size
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/multiplex_timeout,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/multiplex_timeout,Router.readconnroute/How long a session can wait for a connection to become available
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/net_write_timeout,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/net_write_timeout,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/net_write_timeout,Router.readconnroute/Network write timeout
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/password,Router.readconnroute/Password for the user used to retrieve database users
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/prune_sescmd_history,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/prune_sescmd_history,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/prune_sescmd_history,Router.readconnroute/Prune old session command history if the limit is exceeded
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/rank,Router.readconnroute/Service rank
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/retain_last_statements,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/retain_last_statements,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/retain_last_statements,Router.readconnroute/Number of statements kept in memory
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/router_options,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/router_options,Router.readconnroute/A comma separated list of server roles
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_trace,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_trace,Router.readconnroute/Enable session tracing for this service
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_track_trx_state,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_track_trx_state,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_track_trx_state,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_track_trx_state,Router.readconnroute/Track session state using server responses
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/strip_db_esc,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/strip_db_esc,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/strip_db_esc,Router.readconnroute/Strip escape characters from database names
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user,Router.readconnroute/Username used to retrieve database users
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file,Router.readconnroute/Load additional users from a file
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file_usage,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file_usage,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file_usage,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/user_accounts_file_usage,Router.readconnroute/When and how the user accounts file is used
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/version_string,Router.readconnroute/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/version_string,Router.readconnroute/Custom version string to use
    +

    MaxScale

    +

    The parameters for maxscale:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ParameterDescription
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_auth/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_auth/Admin interface authentication
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_enabled/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_enabled/Admin interface is enabled
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_gui/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_gui/Enable admin GUI
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_host/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_host/Admin interface host
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_jwt_algorithm/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_jwt_algorithm/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_jwt_algorithm/JWT signature algorithm
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_jwt_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_jwt_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_jwt_key/Encryption key ID for symmetric signature algorithms. If left empty, MaxScale will generate a random key that is used to sign the JWT.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_log_auth_failures/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_log_auth_failures/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_log_auth_failures/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_log_auth_failures/Log admin interface authentication failures
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_oidc_url/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_oidc_url/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_oidc_url/Extra public certificates used to validate externally signed JWTs
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_pam_readonly_service/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_pam_readonly_service/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_pam_readonly_service/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_pam_readonly_service/PAM service for read-only users
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_pam_readwrite_service/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_pam_readwrite_service/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_pam_readwrite_service/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_pam_readwrite_service/PAM service for read-write users
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_port,maxscale.maxscale/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_port,maxscale.maxscale/Admin interface port
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_secure_gui/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_secure_gui/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_secure_gui/Only serve GUI over HTTPS
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_ca/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_ca/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_ca/Admin SSL CA cert
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_ca_cert/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_ca_cert/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_ca_cert/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_ca_cert/Alias for 'admin_ssl_ca'
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_cert/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_cert/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_cert/Admin SSL cert
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_key/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_key/Admin SSL key
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_version/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_version/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_ssl_version/Minimum required TLS protocol version for the REST API
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_verify_url/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_verify_url/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/admin_verify_url/URL for third-party verification of client tokens
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_connect_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_connect_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_connect_timeout/Connection timeout for fetching user accounts
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_read_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_read_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_read_timeout/Read timeout for fetching user accounts (deprecated)
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_write_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_write_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auth_write_timeout/Write timeout for for fetching user accounts (deprecated)
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auto_tune/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/auto_tune/Specifies whether a MaxScale parameter whose value depends on a specific global server variable, should automatically be updated to match the variable's current value
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_cluster/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_cluster/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_cluster/Cluster used for configuration synchronization. If left empty (i.e., value is ""), synchronization is not done.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_db/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_db/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_db/Database where the 'maxscale_config' table is created
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_interval/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_interval/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_interval/How often to synchronize the configuration
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_password/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_password/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_password/Password for the user used for configuration synchronization
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_timeout/Timeout for the configuration synchronization operations
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_user/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_user/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/config_sync_user/User account used for configuration synchronization
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/debug,maxscale.maxscale/Debug options
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/dump_last_statements/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/dump_last_statements/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/dump_last_statements/In what circumstances should the last statements that a client sent be dumped
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/key_manager/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/key_manager/Key manager type
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/load_persisted_configs/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/load_persisted_configs/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/load_persisted_configs/Specifies whether persisted configuration files should be loaded on startup
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/local_address,maxscale.maxscale/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/local_address,maxscale.maxscale/Local address to use when connecting
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_debug,maxscale.maxscale/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_debug,maxscale.maxscale/Specifies whether debug messages should be logged (meaningful only with debug builds)
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_info,maxscale.maxscale/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_info,maxscale.maxscale/Specifies whether info messages should be logged
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_notice,maxscale.maxscale/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_notice,maxscale.maxscale/Specifies whether notice messages should be logged
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_throttling/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_throttling/Limit the amount of identical log messages than can be logged during a certain time period
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_warn_super_user/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_warn_super_user/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_warn_super_user/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_warn_super_user/Log a warning when a user with super privilege logs in
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_warning,maxscale.maxscale/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/log_warning,maxscale.maxscale/Specifies whether warning messages should be logged
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_auth_errors_until_block/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_auth_errors_until_block/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_auth_errors_until_block/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_auth_errors_until_block/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_auth_errors_until_block/The maximum number of authentication failures that are tolerated before a host is temporarily blocked
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_read_amount/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_read_amount/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_read_amount/Maximum amount of data read before return to epoll_wait
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/maxlog/Log to MaxScale's own log
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ms_timestamp/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ms_timestamp/Enable or disable high precision timestamps
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/passive/True if MaxScale is in passive mode
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/persist_runtime_changes/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/persist_runtime_changes/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/persist_runtime_changes/Persist configurations changes done at runtime
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_classifier/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_classifier/The name of the query classifier to load
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_classifier_args/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_classifier_args/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_classifier_args/Arguments for the query classifier
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_classifier_cache_size/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_classifier_cache_size/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_classifier_cache_size/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_classifier_cache_size/Type: size, default value is 15% of available memory
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_retries/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_retries/Number of times an interrupted query is retried
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_retry_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_retry_timeout/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/query_retry_timeout/The total timeout in seconds for any retried queries
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/rebalance_period/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/rebalance_period/How often should the load of the worker threads be checked and rebalancing be made
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/rebalance_threshold/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/rebalance_threshold/If the difference in load between the thread with the maximum load and the thread with the minimum load is larger than the value of this parameter, then work will be moved from the former to the latter
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/rebalance_window/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/rebalance_window/The load of how many seconds should be taken into account when rebalancing
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/retain_last_statements,maxscale.maxscale/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/retain_last_statements,maxscale.maxscale/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/retain_last_statements,maxscale.maxscale/How many statements should be retained for each session for debugging purposes
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_trace,maxscale.maxscale/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_trace,maxscale.maxscale/How many log entries are stored in the session specific trace log
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_trace_match/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_trace_match/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/session_trace_match/Regular expression that is matched against the contents of the session trace log and if it matches the contents are logged when the session stops
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/skip_name_resolve/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/skip_name_resolve/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/skip_name_resolve/Do not resolve client IP addresses to hostnames during authentication
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/skip_permission_checks/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/skip_permission_checks/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/skip_permission_checks/Skip service and monitor permission checks
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/sql_mode,maxscale.maxscale/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/sql_mode,maxscale.maxscale/The query classifier sql mode
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/syslog/Log to syslog
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/threads/Type: count, default value is based on cpu count
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/users_refresh_interval/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/users_refresh_interval/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/users_refresh_interval/How often the users will be refreshed
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/users_refresh_time/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/users_refresh_time/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/users_refresh_time/How often the users can be refreshed
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/writeq_high_water/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/writeq_high_water/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/writeq_high_water/High water mark of dcb write queue
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/writeq_low_water/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/writeq_low_water/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/writeq_low_water/Low water mark of dcb write queue
    +

    Server Objects

    +

    The parameters for server objects:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ParameterDescription
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/address,servers.servers/Server address
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/authenticator,servers.servers/Server authenticator (deprecated)
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disk_space_threshold,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disk_space_threshold,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/disk_space_threshold,servers.servers/Server disk space threshold
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/extra_port/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/extra_port/Server extra port
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_routing_connections/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_routing_connections/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/max_routing_connections/Maximum routing connections
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/monitorpw/Monitor password
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/monitoruser/Monitor user
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/persistmaxtime/Maximum time that a connection can be in the pool
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/persistpoolmax/Maximum size of the persistent connection pool
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/port,servers.servers/Server port
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/priority/Server priority
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/protocol,servers.servers/Server protocol (deprecated)
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/proxy_protocol/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/proxy_protocol/Enable proxy protocol
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/rank,servers.servers/Server rank
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/socket,servers.servers/Server UNIX socket
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl,servers.servers/Enable TLS for server
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca,servers.servers/Path to the X.509 certificate authority chain file in PEM format. In MaxScale 6 and earlier, this parameter was namedĀ https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca_cert,servers.servers/. In MaxScale 22.08,Ā ssl_ca_certĀ was renamed toĀ ssl_ca. For backward compatibility,Ā ssl_ca_certĀ can be used as an alias, but MariaDB recommends usingĀ ssl_caĀ becauseĀ ssl_ca_certĀ has been deprecated.
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca_cert,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca_cert,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca_cert,servers.servers/Alias forĀ https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_ca,servers.servers/
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cert,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cert,servers.servers/TLS public certificate
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cert_verify_depth,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cert_verify_depth,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cert_verify_depth,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cert_verify_depth,servers.servers/TLS certificate verification depth
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cipher,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_cipher,servers.servers/TLS cipher list
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_key,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_key,servers.servers/TLS private key
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_certificate,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_certificate,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_certificate,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_certificate,servers.servers/Verify TLS peer certificate
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_host,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_host,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_host,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_verify_peer_host,servers.servers/Verify TLS peer host
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_version,servers.servers/https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/ssl_version,servers.servers/Minimum TLS protocol version
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/module-parameters/type,servers.servers/Object type
    +

    MariaDB MaxScale 22.08 Monitors

    +

    The following Monitors are supported by MariaDB MaxScale 22.08:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    MonitorDescription
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/monitor/auroramon/Tracks Servers in an Amazon Aurora deployment
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/monitor/csmon/Tracks Servers in a MariaDB ColumnStore deployment
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/monitor/galeramon/Tracks Servers in a MariaDB Enterprise Cluster deployment
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/monitor/grmon/Tracks Servers in a MySQL Group Replication deployment
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/monitor/mariadbmon/Tracks Servers in a MariaDB Replication deployment
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/monitor/xpandmon/A Xpand cluster monitor
    +

    To see Monitors supported in other versions, seeĀ "Monitors by MariaDB MaxScale Version".

    +

    MariaDB MaxScale 22.08 MaxScale Protocols

    +

    MariaDB MaxScale uses protocols to specify how it communicates with a given client, server, or back-end.

    +

    The following MaxScale Protocols are supported by MariaDB MaxScale 22.08:

    + + + + + + + + + + + + + + + + + + + + + +
    ProtocolDescription
    CDCUsed with connections to a CDC service
    MariaDBProtocolThe client to MaxScale MySQL protocol implementation
    nosqlprotocolMaxScale NoSQL client protocol implementation
    +

    To see MaxScale Protocols supported in other versions, seeĀ "MaxScale Protocols by MariaDB MaxScale Version".

    +

    MariaDB MaxScale 22.08 Routers

    +

    The following Routers are supported by MariaDB MaxScale 22.08:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    RouterTypeDescription
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/routing/avrorouter/ReplicationAvro Router renders Binary Log events to JSON or Avro files and passed through the CDC Protocol to other services, like Kafka or Hadoop
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/routing/binlogrouter/ReplicationBinary Log Router serves binlog events to Replica Servers
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/routing/cat/QueryCat Router sends queries to all Servers
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/routing/hintrouter/QueryHint Router uses routing hints to specify where to send queries
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/routing/kafkacdc/Replicate data changes from MariaDB to Kafka
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/routing/kafkaimporter/Stream Kafka messages into MariaDB
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/routing/mirror/Mirrors SQL statements to multiple targets
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/routing/readconnroute/QueryRead Connection Router balances the query load across the available Servers
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/routing/readwritesplit/QueryRead/Write Splitter sends write operations to the Primary Server and balances the query load of read operations between the Replica Servers
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/routing/replicator/
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/routing/schemarouter/QuerySchema Router provides simple sharding of data across multiple Servers
    https://mariadb.com/docs/skysql-dbaas/ref/mxs/routing/smartrouter/Provides routing for the Smart Query feature
    +

    To see Routers supported in other versions, seeĀ "Routers by MariaDB MaxScale Version".

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Reference Guide/Monitoring Metrics Reference/index.html b/Reference Guide/Monitoring Metrics Reference/index.html new file mode 100644 index 00000000..4c499741 --- /dev/null +++ b/Reference Guide/Monitoring Metrics Reference/index.html @@ -0,0 +1,3235 @@ + + + + + + + + + + + + + + + + + + + + + + + Monitoring Metrics Reference - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Monitoring Metrics Reference

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Panel NameUI TabScopePanel TypeDescription
    ConnectionsstatusservicetableThis panel shows the number of used and aborted connections for each ES node along with theĀ max_connectionsĀ value
    CPU LoadstatusservicegraphThis panel shows the CPU usage for each ES node during the selected time interval
    Current SQL Commandsstatusservicepie-chartThis panel shows the ratio between the types of SQL statements executed by the service during the selected time interval
    Disk Size of Datastatusservicetable"This panel shows the amount of storage space used (as the usage percentage
    QPSstatusservicegraphThis panel shows the queries per second (QPS) executed by the ES node during the selected time interval
    Replicas lagsstatusservicetableThis panel shows average values for certain replication-related metadata to help determine if the replica ES nodes are currently lagging behind the primary ES node
    Replicas statusstatusservicetableThis panel shows summarized values for certain replication-related metadata to help determine if any replica ES nodes encountered replication issues during the selected time interval
    GTID Replication PositionlagsservicegraphThis panel shows the Global Transaction ID (GTID) for each ES node during the selected time interval
    Seconds Behind PrimarylagsservicegraphThis panel shows the lag in seconds behind the primary in terms of the time of the transaction being replicated.
    Exec Primary Log PositionlagsservicegraphThis panel shows the position up to which the primary has processed in the current primary binary log file.
    Read Primary Log PositionlagsservicegraphThis panel shows the position up to which the I/O thread has read in the current primary binary log file.
    MariaDB Client Thread ActivityqueriesservicegraphThis panel shows the number of client threads running on all ES nodes during the selected time interval
    MariaDB QPSqueriesservicegraphThis panel shows the number of queries per second (QPS) executed by all ES nodes during the selected time interval
    MariaDB Slow QueriesqueriesservicegraphThis panel shows the number of slow queries executed by all ES nodes during the selected time interval
    MariaDB Slow QueriesqueriesservicegraphThis panel shows the number of slow queries executed by all ES nodes during the selected time interval
    Top Command CountersqueriesservicegraphThis panel shows the top 30 statement types that were most frequently executed by all ES nodes during the selected time interval
    Top Command Counters HourlyqueriesservicegraphThis panel shows the top 30 statement types that were most frequently executed by all ES nodes in 1 hour intervals over the past 24 hours
    MariaDB Aborted ConnectionsdatabaseservicegraphThis panel shows the number of connections aborted by the ES node during the selected time interval
    MariaDB Open TablesdatabaseservicegraphThis panel shows the number of tables opened by the database servers on all ES nodes during the selected time interval
    MariaDB Service ConnectionsdatabaseservicegraphThis panel shows the number of clients connected to the ES node during the selected time interval
    MariaDB Table LocksdatabaseservicegraphThis panel shows the number of table locks requested by all ES nodes during the selected time interval
    MariaDB Table OpeneddatabaseservicegraphThis panel shows the number of tables that have been opened by all ES nodes during the selected time interval
    MaxScale Server ConnectionsdatabaseservicegraphThis panel shows the number of client connections open between the MaxScale node and each ES node during the selected time interval
    MaxScale Service ConnectionsdatabaseservicegraphThis panel shows the number of clients connected to all MaxScale nodes during the selected time interval
    CPU LoadsystemservicegraphThis panel shows the CPU usage for each ES node during the selected time interval
    Disk Size of DatasystemservicegraphThis panel shows the amount of storage space used by all ES nodes during the selected time interval
    I/OActivity - Page InsystemservicegraphThis panel shows the total number of bytes read from the ES node's file system during the selected time interval
    I/O Activity - Page OutsystemservicegraphThis panel shows the total number of bytes written to the ES node's file system during the selected time interval
    IOPS - Page InsystemservicegraphThis panel shows the total number of reads performed from the ES node's file system during the selected time interval
    IOPS - Page OutsystemservicegraphThis panel shows the total number of writes performed from the ES node's file system during the selected time interval
    MariaDB Network TrafficsystemservicegraphThis panel shows the amount of data sent and received over the network by the database servers on all ES nodes during the selected time interval
    MariaDB Network Usage HourlysystemservicegraphThis panel shows the amount of data sent and received over the network per hour by the database servers on all ES nodes over the past 24 hours
    Memory UsagesystemservicegraphThis panel shows memory usage details for all ES nodes during the selected time interval
    Network ErrorssystemservicegraphThis panel shows the number of network errors encountered by all ES nodes during the selected time interval
    Network Packets DroppedsystemservicegraphThis panel shows the number of network packets dropped by all ES nodes during the selected time interval
    Network Traffic - InboundsystemservicegraphThis panel shows the amount of data received over the network by the operating systems on all ES nodes during the selected time interval
    Network Traffic - OutboundsystemservicegraphThis panel shows the amount of data sent over the network by the operating systems on all ES nodes during the selected time interval
    Aborted ConnectionsstatusserversinglestatThis panel shows the number of aborted connections for the ES node during the selected time interval
    Buffer Pool Size of Total RAMstatusserverpie-chartThis panel shows the current size of the InnoDB buffer pool for the ES node in two units: the absolute size and the percentage of the server's usable memory
    ConnectionsstatusserverstatThis panel shows the number of clients connected to the MaxScale node
    CPU LoadstatusservergaugeThis panel shows the current CPU usage for the ES node
    CPU Usage / LoadstatusservergraphThis panel shows the CPU usage for the ES node during the selected time interval
    QPS/statusserversinglestatThis panel shows the number of queries per second (QPS) processed by the ES node during the selected time interval
    Current SQL Commandsstatusserverpie-chartThis panel shows the ratio between the types of SQL statements executed by the ES node during the selected time interval
    I/O ActivitystatusservergraphThis panel shows the total number of bytes written to or read from the ES node's file system during the selected time interval
    InnoDB Data/secstatusservergraphThis panel shows the number of bytes per second read and written by InnoDB during the selected time interval
    Max ConnectionsstatusservergraphThis panel shows the highest number of clients that were concurrently connected to the MaxScale node during the selected time interval
    Max ConnectionsstatusserverstatThis panel shows the highest number of clients that have ever been concurrently connected to the MaxScale node
    Max Time in QueuestatusservergraphThis panel shows the longest time the MaxScale node waited for an I/O event during the selected time interval
    Network TrafficstatusservergraphThis panel shows the amount of data sent and received over the network by the operating system on the ES node during the selected time interval
    Memory UsagestatusservergaugeThis panel shows the current memory usage details for the ES node
    Resident (RSS) memorystatusservergaugeThis panel shows the current resident set size (RSS) of the MaxScale process
    RO Service ConnectionsstatusserversinglestatThis panel shows the number of clients currently connected to the MaxScale node's read-only listener
    Rows / secstatusservergraphThis panel shows the total number of rows written and read per second by the ES node during the selected time interval
    RW Service ConnectionsstatusserversinglestatThis panel shows the number of clients currently connected to the MaxScale node's read-write listener
    RW / secstatusservergraphThis panel shows the number of read and write operations per second that were handled by the threads on the MaxScale node during the selected time interval
    Database Server ConnectionsstatusservergraphThis panel shows the number of client connections open between the MaxScale node and each ES node during the selected time interval
    MaxScale ConnectionsstatusservergraphThis panel shows the number of clients connected to the MaxScale node's listeners during the selected time interval
    Stack sizestatusservergaugeThis panel shows the current stack size of the MaxScale node
    ThreadsstatusserversinglestatThis panel shows the number of threads currently used by the MaxScale node
    RW/secstatusservergraphThis panel shows the total number of queries that were actively being executed by the MaxScale node during the selected time interval
    MaxScale ConnectionsstatusservergraphThis panel shows the total number of connections created by the MaxScale node since the MaxScale node was started
    Used Connectionsstatusserverpie-chartThis panel shows the current number of client connections as a percentage of the ES node'sĀ max_connectionsĀ value
    MariaDB Aborted ConnectionsdatabaseservergraphThis panel shows the number of connections aborted by the ES node during the selected time interval
    MariaDB Client Thread ActivitydatabaseservergraphThis panel shows the number of client threads connected and running on the ES node during the selected time interval
    MariaDB ConnectionsdatabaseservergraphThis panel shows the number of client connections to the ES node during the selected time interval
    MariaDB Open FilesdatabaseservergraphThis panel shows the number of files opened by the database server on the ES node during the selected time interval
    MariaDB Open TablesdatabaseservergraphThis panel shows the number of tables opened by the database server on the ES node during the selected time interval
    MariaDB Opened Files / secdatabaseservergraphThis panel shows the number of files opened per second by the database server on the ES node during the selected time interval
    MariaDB Table LocksdatabaseservergraphThis panel shows the number of table locks requested by the ES node during the selected time interval
    Temporary Objects CreateddatabaseservergraphThis panel shows the number of temporary tables created by the ES node during the selected time interval
    MariaDB Table Definition CachecachesservergraphThis panel shows how many table definitions were cached by the ES node during the selected time interval
    MariaDB Table Open Cache StatuscachesservergraphThis panel shows the activity of the table open cache on the ES node during the selected time interval
    MariaDB Thread CachecachesservergraphThis panel shows the number of threads created and cached for re-use on the ES node during the selected time interval
    MariaDB Handlers / secqueriesservergraphThis panel shows how many internal query handlers per second have been created by the ES node during the selected time interval
    MariaDB QPS and QuestionsqueriesservergraphThis panel shows the number of queries and questions per second executed by the ES node during the selected time interval
    MariaDB Slow QueriesqueriesservergraphThis panel shows the number of slow queries executed by the ES node during the selected time interval
    MariaDB SortsqueriesservergraphThis panel shows the number of times the ES node has used certain algorithms to sort data during the selected time interval
    MariaDB Handlers / secqueriesservergraphThis panel shows the number of transaction-related handlers created by the ES node during the selected time interval
    Top Command CountersqueriesservergraphThis panel shows the top 30 statement types that were most frequently executed by the ES node during the selected time interval
    Top Command Counters HourlyqueriesservergraphThis panel shows the top 30 statement types that were most frequently executed by the ES node in 1 hour intervals over the past 24 hours
    CPU Usage / LoadsystemservergraphThis panel shows the CPU usage for the ES node during the selected time interval
    Disk Size of DatasystemservergraphThis panel shows the amount of storage space used by the ES node during the selected time interval
    Disk Size of LogssystemservergraphThis panel shows the amount of storage space used by the server error logs during the selected time interval
    I/O ActivitysystemservergraphThis panel shows the total number of bytes written to or read from the ES node's file system during the selected time interval
    InnoDB Data / secsystemservergraphThis panel shows the number of bytes per second read and written by InnoDB during the selected time interval
    IOPSsystemservergraphThis panel shows the number of input/output operations per second performed by the ES node during the selected time interval
    MariaDB Memory Overviewsystemservergraph"This panel shows how much memory the ES node used for the InnoDB buffer pool
    MariaDB Network TrafficsystemservergraphThis panel shows the amount of data sent and received over the network by the database server on the ES node during the selected time interval
    MariaDB Network Usage HourlysystemservergraphThis panel shows the amount of data sent and received over the network per hour by the database server on the ES node over the past 24 hours
    Memory DistributionsystemservergraphThis panel shows memory usage details for the ES node during the selected time interval
    Network ErrorssystemservergraphThis panel shows the number of network errors encountered by the ES node during the selected time interval
    Network Packets DroppedsystemservergraphThis panel shows the number of network packets dropped by the ES node during the selected time interval
    Network TrafficsystemservergraphThis panel shows the amount of data sent and received over the network by the operating system on the ES node during the selected time interval
    ErrorsperformanceservergraphThis panel shows the number of errors encountered by threads on the MaxScale node during the selected time interval
    MaxScale DescriptorsperformanceservergraphThis panel shows the number of descriptors used by the MaxScale node during the selected time interval
    MaxScale HangupsperformanceservergraphThis panel shows the number of client connections closed by the MaxScale node during the selected time interval
    Memory UsageperformanceservergraphThis panel shows memory usage details for the MaxScale node during the selected time interval
    MaxScale ModulesmodulesservertableThis panel lists the modules installed on the MaxScale node
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Reference Guide/REST API Reference/index.html b/Reference Guide/REST API Reference/index.html new file mode 100644 index 00000000..05a38e67 --- /dev/null +++ b/Reference Guide/REST API Reference/index.html @@ -0,0 +1,2533 @@ + + + + + + + + + + + + + + + + + + + + + + + REST API Reference - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    REST API Reference

    +

    The easiest way to get started with the API is to use this swagger Docs. Just generate your API key , Click Authorize and enter <yourAPI Key> and try out any of the APIs.

    +

    You can use the REST API to Provision, get pricing and billing information, fetch/change service or MariaDB configuration, Manage users and their roles, schedule backups or restores, scale, stop and Delete services.

    +

    Please refer to the API docs for examples and a complete list of all the APIs.

    +

    The Backup Service API is available here

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Reference Guide/Region Choices/index.html b/Reference Guide/Region Choices/index.html new file mode 100644 index 00000000..b99423c9 --- /dev/null +++ b/Reference Guide/Region Choices/index.html @@ -0,0 +1,2791 @@ + + + + + + + + + + + + + + + + + + + + + + + Region Choices - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Region Choices

    +

    AWS Regions

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    RegionLocation
    ap-northeast-1Tokyo, Japan
    ap-northeast-2Seoul, South Korea
    ap-southeast-1Jurong West, Singapore
    ap-southeast-2Sydney, Australia
    ca-central-1MontrƩal, QuƩbec, Canada
    eu-central-1Frankfurt, Germany
    eu-north-1Stockholm, Sweden
    eu-west-1Dublin, Ireland
    eu-west-2London, England, UK
    eu-west-3Paris, France
    sa-east-1SĆ£o Paulo, Brazil
    us-east-1Northern Virginia, USA
    us-east-2Ohio, USA
    us-west-2Oregon, USA
    +

    GCP Regions

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    RegionLocation
    asia-northeast1Tokyo, Japan
    asia-south1Mumbai, India
    asia-southeast1Jurong West, Singapore
    asia-southeast2Jakarta, Indonesia
    australia-southeast1Sydney, Australia
    europe-north1Hamina, Finland
    europe-west1St. Ghislain, Belgium
    europe-west2London, England, UK
    europe-west3Frankfurt, Germany
    europe-west4Eemshaven, Netherlands
    europe-west9Paris, France
    northamerica-northeast1MontrƩal, QuƩbec, Canada
    us-central1Council Bluffs, Iowa, USA
    gcp-us-east1Moncks Corner, South Carolina, USA
    us-east4Ashburn (Loudoun County), Virginia, USA
    us-west1The Dalles, Oregon, USA
    us-west2Los Angeles, California, USA
    us-west4Las Vegas, Nevada, USA
    +

    Azure Regions

    + + + + + + + + + + + + + + + + + +
    RegionLocation
    eastusRichmond, Virginia, USA
    northeuropeDublin, Ireland, USA
    +

    PleaseĀ contact usĀ if any aspect of service does not align to your intended use case.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Reference Guide/Sky Stored Procedures/index.html b/Reference Guide/Sky Stored Procedures/index.html new file mode 100644 index 00000000..28c8b026 --- /dev/null +++ b/Reference Guide/Sky Stored Procedures/index.html @@ -0,0 +1,3029 @@ + + + + + + + + + + + + + + + + + + + + + Sky Stored Procedures - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Sky Stored Procedures

    + +

    change_external_primary

    +

    Executes theĀ CHANGEĀ MASTERĀ TOĀ statement to configures inbound replication from an external primary server based on binary log file and position.

    +
    CALL sky.change_external_primary(
    +   host VARCHAR(255),
    +   port INT,
    +   logfile TEXT,
    +   logpos LONG ,
    +   use_ssl_encryption BOOLEAN
    +);
    +
    +
    -- Run_this_grant_on_your_external_primary                                                                      |
    +GRANT REPLICATION SLAVE ON *.* TO 'skysql_replication'@'%' IDENTIFIED BY '<password_hash>';                  |
    +
    +

    change_connect_retry

    +

    Sets the connection retry interval for the external replication master.

    +
    CALL change_connect_retry(connect_retry INT);
    +
    +

    If the value is NULL, a default retry interval of 60 seconds will be used.

    +

    change_external_primary_gtid

    +

    Executes theĀ CHANGEĀ MASTERĀ TOĀ statement to configures inbound replication from an external primary server based on the provided GTID.

    +
    CALL sky.change_external_primary_gtid(
    +   host VARCHAR(255),
    +   port INT,
    +   gtid VARCHAR(60),
    +   use_ssl_encryption BOOLEAN
    +);
    +
    +
    -- Run_this_grant_on_your_external_primary
    +GRANT REPLICATION SLAVE ON *.* TO 'skysql_replication'@'%' IDENTIFIED BY '<password_hash>';
    +
    +

    change_heartbeat_period

    +

    Sets the heartbeat period for the external replication master.

    +
    CALL change_heartbeat_period(heartbeat_period DECIMAL(10,3));
    +
    +

    If the value is NULL, a default heartbeat period of 5 seconds will be used.

    +

    change_replica_delay

    +

    Sets the replication delay for the external replication master.

    +
    CALL change_replica_delay(replica_delay INT);
    +
    +

    If the value is NULL, a default delay of 1 second will be used.

    +

    change_use_ssl_encryption

    +

    Toggles the SSL encryption setting for the external replication master.

    +
    CALL change_use_ssl_encryption(use_ssl_encryption BOOLEAN);
    +
    +

    If the value is NULL, SSL encryption will be enabled by default.

    +

    gtid_status

    +

    Provides a list of GTID-relatedĀ system variables.

    +
    CALL sky.gtid_status();
    +
    +
    +-------------------+---------------------------+
    +| Variable_name     | Value                     |
    ++-------------------+---------------------------+
    +| gtid_binlog_pos   | 435700-435700-122         |
    +| gtid_binlog_state | 435700-435700-122         |
    +| gtid_current_pos  | 0-100-1,435700-435700-122 |
    +| gtid_slave_pos    | 0-100-1                   |
    ++-------------------+---------------------------+
    +
    +

    kill_session

    +

    Kills any non-root or non-SkySQL threads, similar to theĀ KILLĀ statement.

    +
    CALL sky.kill_session(IN thread BIGINT);
    +
    +

    replication_grants

    +

    Provides aĀ GRANTĀ statement to run on an external primary server when configuring inbound replication.

    +
    CALL sky.replication_grants();
    +
    +
    -- Run_this_grant_on_your_external_primary
    +GRANT REPLICATION SLAVE ON *.* TO 'skysql_replication'@'%' IDENTIFIED BY '<password_hash>';
    +
    +

    replication_status

    +

    Executes theĀ SHOWĀ REPLICAĀ STATUSĀ statement to obtain the status of inbound replication.

    +
    CALL sky.replication_status()\G
    +
    +
      +
    • +
      *************************** 1. row ***************************
      +                Slave_IO_State: Waiting for master to send event
      +                   Master_Host: mariadb1.example.com
      +                   Master_User: skysql_replication
      +                   Master_Port: 3306
      +                 Connect_Retry: 60
      +               Master_Log_File: mariadb-bin.000001
      +           Read_Master_Log_Pos: 558
      +                Relay_Log_File: mariadb-relay-bin.000002
      +                 Relay_Log_Pos: 674
      +         Relay_Master_Log_File: mariadb-bin.000001
      +              Slave_IO_Running: Yes
      +             Slave_SQL_Running: Yes
      +               Replicate_Do_DB:
      +           Replicate_Ignore_DB:
      +            Replicate_Do_Table:
      +        Replicate_Ignore_Table:
      +       Replicate_Wild_Do_Table:
      +   Replicate_Wild_Ignore_Table:
      +                    Last_Errno: 0
      +                    Last_Error:
      +                  Skip_Counter: 0
      +           Exec_Master_Log_Pos: 558
      +               Relay_Log_Space: 985
      +               Until_Condition: None
      +                Until_Log_File:
      +                 Until_Log_Pos: 0
      +            Master_SSL_Allowed: No
      +            Master_SSL_CA_File:
      +            Master_SSL_CA_Path:
      +               Master_SSL_Cert:
      +             Master_SSL_Cipher:
      +                Master_SSL_Key:
      +         Seconds_Behind_Master: 0
      + Master_SSL_Verify_Server_Cert: No
      +                 Last_IO_Errno: 0
      +                 Last_IO_Error:
      +                Last_SQL_Errno: 0
      +                Last_SQL_Error:
      +   Replicate_Ignore_Server_Ids:
      +              Master_Server_Id: 100
      +                Master_SSL_Crl:
      +            Master_SSL_Crlpath:
      +                    Using_Gtid: Slave_Pos
      +                   Gtid_IO_Pos: 0-100-1
      +       Replicate_Do_Domain_Ids:
      +   Replicate_Ignore_Domain_Ids:
      +                 Parallel_Mode: conservative
      +                     SQL_Delay: 0
      +           SQL_Remaining_Delay: NULL
      +       Slave_SQL_Running_State: Slave has read all relay log; waiting for more updates
      +              Slave_DDL_Groups: 0
      +Slave_Non_Transactional_Groups: 0
      +    Slave_Transactional_Groups: 0
      +
      +
    • +
    +

    reset_replication

    +

    Executes theĀ RESETĀ REPLICAĀ statement to clear inbound replication configuration.

    +
    CALL sky.reset_replication();
    +
    +
    +------------------------+
    +| Message                |
    ++------------------------+
    +| Replica has been reset |
    ++------------------------+
    +
    +

    set_master_ssl

    +

    Toggles theĀ MASTER_SSLĀ replication option using theĀ CHANGEĀ MASTERĀ TOĀ statement.

    +
    CALL sky.set_master_ssl();
    +
    +

    skip_repl_error

    +

    This stored procedure can be used to ignore a transaction that is causing a replication error.

    +

    Executes theĀ STOPĀ REPLICAĀ statement, then sets theĀ sql_slave_skip_counterĀ system variable, and then executes theĀ STARTĀ REPLICAĀ statement to skip a single transaction. Does not currently work with GTID.

    +
    CALL sky.skip_repl_error();
    +
    +

    start_replication

    +

    Executes theĀ STARTĀ REPLICAĀ statement to start inbound replication from an external primary.

    +
    CALL sky.start_replication();
    +
    +
    +----------------------------------------+
    +| Message                                |
    ++----------------------------------------+
    +| External replication running normally. |
    ++----------------------------------------+
    +
    +

    start_replication_until

    +

    Start the external replication until a specified relay log file and position. It checks if the replication threads are running and starts the replication if they are not. It also provides feedback on the replication status.

    +
    CALL start_replication_until(relay_log_file TEXT, relay_log_pos LONG);
    +
    +

    start_replication_until_gtid

    +

    Starts the external replication until the specified GTID position. It checks if the replication threads are running and starts the replication if they are not. It also provides feedback on the replication status.

    +
    CALL start_replication_until_gtid(master_gtid_pos TEXT);
    +
    +

    stop_replication

    +

    Executes theĀ STOPĀ REPLICAĀ statement to stop inbound replication from an external primary.

    +
    CALL sky.stop_replication();
    +
    +
    +---------------------------------+
    +| Message                         |
    ++---------------------------------+
    +| Replication is down or disabled |
    ++---------------------------------+
    +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Reference Guide/index.html b/Reference Guide/index.html new file mode 100644 index 00000000..0cfd3dd3 --- /dev/null +++ b/Reference Guide/index.html @@ -0,0 +1,2628 @@ + + + + + + + + + + + + + + + + + + + + + + + Reference Guides - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + + + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Security/Configuring Firewall/index.html b/Security/Configuring Firewall/index.html new file mode 100644 index 00000000..01475f29 --- /dev/null +++ b/Security/Configuring Firewall/index.html @@ -0,0 +1,2696 @@ + + + + + + + + + + + + + + + + + + + + + + + Configuring Firewall - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Configuring Firewall

    +

    SkySQL services are firewall-protected.

    +

    Access to SkySQL services is managed on a per-service basis.

    +

    IPv4 addresses and IPv4 netblocks can be added to the allowlist to enable service access. Access from other addresses will be blocked.

    +

    Default

    +

    By default, when a service is launched its allowlist is empty. All external traffic to the service is blocked.

    +

    Secure Access Configuration

    +

    To modify Secure Access settings:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Click the "Settings" link in the main menu (left navigation in the Portal).
    4. +
    5. Click the "Secure Access" button.
    6. +
    +

    secure-access.png

    +

    Secure Access Settings

    +

    Alternatively, you can access firewall settings for a specific service by clicking on the "MANAGE" button for the desired service, then choose "Manage allowlist" from the menu.

    +

    allow-list-dialog.png

    +

    Allowlist dialog

    +

    Add to the Allowlist

    +

    IP addresses can be added to the allowlist from the Firewall settings interface or a service's Security Access interface:

    +
      +
    1. Enter an IPv4 address or IPv4 netblock.
    2. +
    3. Optionally enter an alias for this address. An alias provides a way to remember why an address was added to the allowlist.
    4. +
    5. Click the "Save" button.
    6. +
    +

    After saving the change, aĀ notificationĀ will be provided when the change has been applied.

    +

    Remove from the Allowlist

    +

    IP addresses can be removed from the allowlist from the Firewall settings interface or a service's Security Access interface:

    +
      +
    1. Click the "X" button to the right of the entry to remove.
    2. +
    3. Click the "Save" button.
    4. +
    +

    After saving the change, aĀ notificationĀ will be provided when the change has been applied.

    +

    Edit an Allowlist Entry

    +

    An allowlist entry can be edited from the Firewall settings interface or a service's Security Access interface:

    +
      +
    1. Modify the IP address or alias of the desired allowlist entry.
    2. +
    3. Click the "Save" button.
    4. +
    +

    After saving the change, aĀ notificationĀ will be provided when the change has been applied.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Security/Encryption/index.html b/Security/Encryption/index.html new file mode 100644 index 00000000..06e86469 --- /dev/null +++ b/Security/Encryption/index.html @@ -0,0 +1,2667 @@ + + + + + + + + + + + + + + + + + + + + + + + Encryption - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Encryption

    +

    Data-in-Transit Encryption

    +

    SkySQL features data-in-transit encryption by default.

    +

    Client-to-Server

    +

    By default, SkySQL services feature data-in-transit encryption for client connections: +-TLS 1.2 and TLS 1.3 are supported. SSL/TLS certificates and encryption settings are not customer-configurable.

    +

    For information on how to connect with TLS, see "Connect and Query".

    +

    The "Disable SSL/TLS" option may be appropriate for some customers when also using AWS PrivateLink or GCP VPC Peering.

    +

    Server-to-Server

    +

    SkySQL services perform server-to-server communication between MariaDB MaxScale, MariaDB Enterprise Server, MariaDB Xpand nodes, and SkySQL infrastructure.

    +

    By default, these server-to-server communications are protected with data-in-transit encryption:

    +

    For SkySQL Services on AWS, see "Encryption in transit(AWS)". SkySQL uses configurations which feature automatic in-transit encryption.

    +

    For SkySQL Services on GCP, see "Encryption in transit (GCP)". SkySQL uses encryption by default.

    +

    Data-at-Rest Encryption

    +

    SkySQL features transparent data-at-rest encryption.

    +

    SkySQL Services on AWS use Amazon EBS encryption.

    +

    SkySQL Services on GCP benefits from encryption by default.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Security/Managing API keys/index.html b/Security/Managing API keys/index.html new file mode 100644 index 00000000..a106d8df --- /dev/null +++ b/Security/Managing API keys/index.html @@ -0,0 +1,2636 @@ + + + + + + + + + + + + + + + + + + + + + + + Managing API keys - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Managing API keys

    +

    Getting Started with API Keys

    +
      +
    1. +

      Go to SkySQL API Key management page: https://app.skysql.com/user-profile/api-keys and generate an API key

      +
    2. +
    3. +

      Export the value from the token field to an environment variable $API_KEY

      +
    4. +
    5. +

      Use it on subsequent request, e.g: +

       curl --request GET 'https://api.skysql.com/provisioning/v1/services' \
      + --header "X-API-Key: $API_KEY"
      +

      +
    6. +
    +

    Managing API Keys

    +

    Use the Portal to create new, revoke or permanently delete these Keys.

    +

    OR

    +

    You can use the swagger API portal to manage the keys.

    +
      +
    1. +

      Fetch all keys

      +
    2. +
    3. +

      Create a new API Key

      +
    4. +
    5. +

      Delete a user specific Key

      +
    6. +
    7. +

      Update a user specific key

      +
    8. +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Security/Managing Portal Users/index.html b/Security/Managing Portal Users/index.html new file mode 100644 index 00000000..d59b9fec --- /dev/null +++ b/Security/Managing Portal Users/index.html @@ -0,0 +1,2710 @@ + + + + + + + + + + + + + + + + + + + + + + + Managing Portal Users - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Managing Portal Users

    +

    By default, SkySQL services are launched and managed in theĀ Portal.

    +

    For multiple SkySQL ID accounts to jointly manage a set of SkySQL services, these accounts can be added to a Team.

    +

    The User Management interface in theĀ PortalĀ is a self-service tool to manage your Team.

    +

    A Team can be managed by the initial user on the Team or by any Administrator added to the Team.

    +

    An email address can belong to only one SkySQL Team. If an email address is already in a Team, it cannot be added to another Team.

    +

    Access to User Management

    +

    To access the User Management interface:

    +
      +
    1. Log in to theĀ Portal.
    2. +
    3. Click the "Settings" link in the main menu (left navigation in the Portal).
    4. +
    5. Click the User Management button.
    6. +
    +

    user-management.png

    +

    User Management

    +

    Roles

    +

    Each Team member has one of the following roles:

    +
      +
    • Administrator
    • +
    • Member
    • +
    • Viewer
    • +
    • Billing
    • +
    +

    Add a Team Member

    +

    user-management-invite.png

    +

    User Management - Invite

    +

    From the User Management interface, an Administrator can invite someone to join a team:

    +
      +
    1. Click the "Invite" button in the upper right corner of the User Management interface.
    2. +
    3. Enter the email address of the person to invite to the team.
    4. +
    5. Choose the desired role for this user.
    6. +
    7. Click the "Add User" button.
    8. +
    +

    Once a user has been invited, they will appear in the Team member list in an "Invited" status until the invitation is accepted.

    +

    An invitation is delivered by email. The user will be prompted to complete account setup when accepting the invitation.

    +

    You can withdraw the invitation before it is accepted by clicking on the "Cancel Invitation" link in the Team member list.

    +

    Remove a Team Member

    +

    From the User Management interface, an Administrator can remove a team member:

    +
      +
    1. Identify the team member to remove.
    2. +
    3. Click the ellipsis icon ("...") on the right side of that user's row.
    4. +
    5. Select the "Deactivate" menu item.
    6. +
    7. Read the displayed warning.
    8. +
    9. Click the "Deactivate" button to complete deactivation.
    10. +
    +

    Edit a Role

    +

    From the User Management interface, an Administrator can change a team member's role:

    +
      +
    1. Identify the team member to modify.
    2. +
    3. Click the ellipsis icon ("...") on the right side of that user's row.
    4. +
    5. Select the "Edit" menu item.
    6. +
    7. Choose the desired Role for the user.
    8. +
    9. Click the "Save" button to complete the change.
    10. +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Security/Portal Single Sign-On/index.html b/Security/Portal Single Sign-On/index.html new file mode 100644 index 00000000..001e8780 --- /dev/null +++ b/Security/Portal Single Sign-On/index.html @@ -0,0 +1,2531 @@ + + + + + + + + + + + + + + + + + + + + + + + Portal Single Sign-On - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Portal Single Sign-On

    +

    By default, authentication to the SkySQL Portal is performed with SkySQL ID credentials.

    +

    Users with personal accounts with Google, GitHub LinkedIn or business Google G Suite accounts can authenticate via social login. This ability does not depend on enterprise authentication configuration.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Security/Private VPC connections/index.html b/Security/Private VPC connections/index.html new file mode 100644 index 00000000..86aeeaf5 --- /dev/null +++ b/Security/Private VPC connections/index.html @@ -0,0 +1,2532 @@ + + + + + + + + + + + + + + + + + + + + + + + Private VPC connections - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Private VPC connections

    +

    Some customers may have regulatory requirements or information security policies which prohibit the default database connections over the public internet.

    +

    SkySQL services can optionally be configured for private connections using cloud provider-specific features - See Using Private VPC Connections for details on how to set this up.

    +

    By default, client traffic to SkySQL services may transit the public internet and is protected with TLS/SSL and a firewall configured by IP allowlist.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Security/allow-list-dialog.png b/Security/allow-list-dialog.png new file mode 100644 index 00000000..fc381415 Binary files /dev/null and b/Security/allow-list-dialog.png differ diff --git a/Security/index.html b/Security/index.html new file mode 100644 index 00000000..cb847492 --- /dev/null +++ b/Security/index.html @@ -0,0 +1,2528 @@ + + + + + + + + + + + + + + + + + + + + + + + Security - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    + +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Security/secure-access.png b/Security/secure-access.png new file mode 100644 index 00000000..1666f3dc Binary files /dev/null and b/Security/secure-access.png differ diff --git a/Security/user-management-invite.png b/Security/user-management-invite.png new file mode 100644 index 00000000..ee7f883c Binary files /dev/null and b/Security/user-management-invite.png differ diff --git a/Security/user-management.png b/Security/user-management.png new file mode 100644 index 00000000..669758fd Binary files /dev/null and b/Security/user-management.png differ diff --git a/SkyCopilot Guide/ama1.png b/SkyCopilot Guide/ama1.png new file mode 100644 index 00000000..6ccd8575 Binary files /dev/null and b/SkyCopilot Guide/ama1.png differ diff --git a/SkyCopilot Guide/ama2.png b/SkyCopilot Guide/ama2.png new file mode 100644 index 00000000..a28f8b17 Binary files /dev/null and b/SkyCopilot Guide/ama2.png differ diff --git a/SkyCopilot Guide/ama3.png b/SkyCopilot Guide/ama3.png new file mode 100644 index 00000000..14573d59 Binary files /dev/null and b/SkyCopilot Guide/ama3.png differ diff --git a/SkyCopilot Guide/app1.png b/SkyCopilot Guide/app1.png new file mode 100644 index 00000000..a39623b8 Binary files /dev/null and b/SkyCopilot Guide/app1.png differ diff --git a/SkyCopilot Guide/dba1.png b/SkyCopilot Guide/dba1.png new file mode 100644 index 00000000..e6c29f4e Binary files /dev/null and b/SkyCopilot Guide/dba1.png differ diff --git a/SkyCopilot Guide/index.html b/SkyCopilot Guide/index.html new file mode 100644 index 00000000..39b6b976 --- /dev/null +++ b/SkyCopilot Guide/index.html @@ -0,0 +1,2773 @@ + + + + + + + + + + + + + + + + + + + + + + + SkyCopilot User Guide (Tech Preview) - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    SkyCopilot User Guide (Tech Preview)

    +

    SkyCopilot is a conversational AI tool designed to boost developer and DBA productivity. It can generate complex SQL queries spanning multiple tables, write code to integrate external tools with SkySQL, create MariaDB stored procedures, analyze and optimize slow queries, introspect the vast configuration options or current status of your SkySQL cluster, and much more. SkyCopilot can assist you throughout the entire lifecycle with SkySQL, including development, testing, migration, tuning, and debugging in production.

    +

    It relies on a built-in Vector Database to semantically search SkySQL documentation, AI agents to convert your queries to SQL, and OpenAI's large language model to generate natural language responses.

    +
    +

    Note

    +

    This is currently in Tech Preview. It is Free to use. We may impose usage limits or request you to upgrade to a paid plan as we finalize the product.

    +
    +

    +

    Quickstart

    +

    SkyCopilot offers 3 conversation modes to choose from:

    +

    1. Ask Me Anything

    +

    This mode allows you to ask any questions about SkySQL or MariaDB. For SkySQL-specific queries, ensure your question includes the word 'SkySQL'. This mode is best to get answers to conceptual questions about SkySQL or general MariaDB.

    +

    At the moment, this mode does not have access to your database. +You will notice a dropdown with Sample Questions. These are pre-set questions that you can choose from to get you started.

    +

    Examples:

    +
      +
    • General MariaDB Query: "What is the default storage engine in SkySQL?"
    • +
    • SkySQL-specific Query: "Show me a SkySQL program to connect from Java" or "In SkySQL, how can I configure my DB properties?"
    • +
    +
    +

    Note

    +

    In this mode, you can generate SQL queries or even entire programs, but you have to copy/paste to your app or SQL editor environment to execute.

    +
    +

    2. DBA Assist

    +

    Use this mode to ask questions related to system information, tuning, and diagnostics of your databases. Always refer to the README in the Chat for detailed instructions before using this mode.

    +

    You will notice a dropdown with Sample Questions. These are pre-set questions that you can choose from to get you started.

    +

    Examples:

    +
      +
    • "Analyze the slowest query and provide specific optimization suggestions"
    • +
    • "How can I optimize the performance of my MariaDB instance?"
    • +
    • "What are the recommended configurations for a high-availability setup in SkySQL?"
    • +
    +
    +

    Note

    +

    This mode is designed to assist DBAs with their routine tasks and troubleshooting. You should go through the README in the Chat to configure your DB for this mode.

    +
    +

    3. Chat With Your App Data

    +

    This mode allows you to interact with your application databases securely. Generated SQL queries will be executed safely, providing real-time data insights and actions.

    +

    Examples:

    +

    If you connect to the Northwind DB (i.e connect to the Demo Server), you could try out questions like these.

    +
      +
    • "Show me the top 5 customers that placed the most orders."
    • +
    • ā€œShow me the total sales revenue per year. Take into account discounts offered.ā€
    • +
    +
    +

    Note

    +

    This mode is designed to assist DBAs with their routine tasks and troubleshooting. You should go through the README and the 'Prompt Engg Guide' in the Chat to get the best out of this mode.

    +
    +

    ā€˜Ask Me Anythingā€™ Mode

    +

    Use this mode when trying to understand MariaDB and SkySQL concepts or features, generate code, perform integration, or manage data migration. In this mode, SkyCopilot relies on the inherent knowledge in OpenAI's large language model about MariaDB and the SkySQL documentation.

    +

    It is important to note that while you can generate SQL statements, procedures or other arbitrary code snippets, you have to copy/paste into your app or SQL editor to run the SQL.

    +

    Few 'Best Practices':

    +
      +
    • For SkySQL-specific questions, ensure your query includes the word 'SkySQL'.
    • +
    • To get the most accurate responses, always do two things:
    • +
    • Provide sufficient context in your prompts.
    • +
    • Have a conversation. When a response is provided, nudge with more clarification or corrections. The past discussion automatically becomes context for new questions.
    • +
    +

    Examples:

    +
      +
    1. General MariaDB Query: "What is the default storage engine in MariaDB?"
    2. +
    3. SkySQL-specific Query: "Show me a SkySQL program to connect from Java" or "In SkySQL, how can I configure my DB properties?"
    4. +
    5. CSV Loading Example: "I need to load a local CSV file into SkySQL. Below is sample content in the file. I need you to infer the schema with correct data types, show me the create table command, and then commands to load the data."
      +
      VIN,County,City,State,Postal Code,Model Year,Make,Model,Electric Vehicle Type,Clean Alternative Fuel Vehicle (CAFV) Eligibility,Electric Range,Base MSRP,Legislative District,DOL Vehicle ID,Vehicle Location,Electric Utility,2020 Census Tract  
      +1N4AZ0CP5D,Kitsap,Bremerton,WA,98310,2013,NISSAN,LEAF,Battery Electric Vehicle (BEV),
      +Clean Alternative Fuel Vehicle Eligible,75,0,23,214384901,POINT (-122.61136499999998 47.575195000000065), ...
      +
    6. +
    +

    This will produce results like this ā€¦

    +

    AMA example 1 +AMA example 2 +AMA example 3

    +

    ā€˜DBA Assistā€™ mode

    +

    Use this mode to ask questions related to system information, tuning, and diagnostics of your databases. This mode uses the built-in system tables and meta data to answer questions.

    +

    Questions are decomposed into one or more steps and each step is typically converted to a SQL statement on one or more system tables (tables/views in information_schema, mysql, performance schemas) and executed.

    +

    Prerequisites:

    +
      +
    • The easiest way to try things out is to use our Demo DB Server. It is a single server with sample data and some slow queries logged.
    • +
    • You can start with some of the sample questions available in the SkyCopilot UI.
    • +
    • Alternatively, you can connect to any MariaDB running on SkySQL or elsewhere. This way, you can try with your own workloads. Note the following:
      + -- The DB user needs some privileges as noted below. Rest assured, SkyCopilot does not require any write privileges to your schemas.
      + -- We recommend starting with Development/Test DBs first.
      + -- Grant the DB user permissions required for DBA Assistance ...
    • +
    +
    CREATE USER IF NOT EXISTS 'skyai'@'%' IDENTIFIED BY 'a\_secure\_password';  
    +GRANT SELECT, PROCESS, SHOW VIEW, SHOW DATABASES ON \*.\* TO \`skyai\`@\`%\` ;  
    +GRANT CREATE, DROP, CREATE VIEW ON \`sky\_sys\_catalog\`.\* TO \`skyai\`@\`%\` ;
    +
    +
      +
    • Assuming you want to analyze slow queries, you need to turn on 'Slow query' logging. The slow_log overhead is proportional to the amount of queries logged. It is recommended you start with a high slow_query_time, implement a log_slow_rate_limit, and disable logging when not in use.
      + -- If using SkySQL, go to Config Manager to see all the current configuration templates. If you are just using the default one ("SkySQL Default - Enterprise Server..."), click the 'Create New' button, and change the following settings:
    • +
    • Change 'slow_query_log' to ON.
    • +
    • Change 'log_output' to TABLE (defaults to FILE).
    • +
    • Adjust the 'long_query_time' if required (Defaults to 10 secs). If set too low you could substantially increase the load. You can check the global status variable slow_queries to tune the long_query_time.
    • +
    +

    It is also useful to turn ON 'Performance_schema' (though, note that this option will restart your DB service and does introduce between 1-10% overhead so implementation should be tested/tuned for best practice).

    +

    Connect to your DB server or cluster:

    +

    Once you have followed the above steps you have to connect to a DB server to start using this mode.

    +

    DBA Assist example

    +

    Sample questions:

    +

    The following sample questions are also accessible in the dropdown menu on the left pane. This shows up once you have a successful connection to the DB.

    +
      +
    • Which queries are taking the most time?
    • +
    • Analyze the slowest query and provide details on where the time is spent.
    • +
    • Analyze the slowest query and provide specific optimization suggestions.
    • +
    • Show me all currently running queries.
    • +
    • Provide a report on the health of my server.
    • +
    • Which users have been the most active?
    • +
    • Analyze the state of my InnoDB storage engine. Any insights?
    • +
    • How many threads are active and what is the maximum allowed?
    • +
    +

    Scope of understanding:

    +

    The AI assistant answers all the questions using system tables in the ā€œinformation_schemaā€ and ā€œmysqlā€ schemas primarily. It will also attempt to answer questions using the ā€œsysā€ and ā€œperformance_schemaā€ schemas - if ā€œperf_schemaā€ is turned ON.

    +

    We currently provide contextual information to the LLM from system schemas. We provide a lot more details on a few specific tables - information_schema.tables, mysql.slow_log, information_schema.global_variables and global_status, process_list, and the events_statements_history tables.

    +

    Often your questions will result in a sequence of SQL statements being executed. You will always see the actual SQL statements being used in the sql tab in the response. Sometimes your question doesnā€™t require any SQL execution (e.g. explain how MariaDB implements row level locking) and we will try to provide the best possible answer using the foundational knowledge in OpenAIā€™s GPT models about MySQL and MariaDB.

    +
    +

    Note

    +

    We use OpenAIā€™s LLM to only generate SQL or provide informational content. We always execute these statements in the secure SkySQL environment.

    +
    +

    It is important to note that the current implementation does not have any access to historical metrics (outside of what is available and logged to ā€œslow_queryā€ and the ā€œperformance_schemaā€ tables), file system (for example, it cannot assess the percentage of disk that is consumed by data or indexes or cannot access error or general logs).

    +

    ā€˜Chat with your App Dataā€™ mode

    +

    This mode allows you to interact with your application databases securely. Generated SQL queries will be executed safely, providing real-time data insights and actions.

    +

    Before you can start chatting, you must first connect and select a DB schema to operate on.

    +
      +
    • Step 1: Connect to a DB service. The demo server has schemas like the Microsoft Northwind DB.
    • +
    • Step 2: Select the DB schema to operate on. Currently we only support one DB schema at a time. We intend to support multiple schemas in the future.
    • +
    • Step 3: Select the Tables for more selective context.
    • +
    • To get higher accuracy, when your schema has many tables (even if it is just > 10) you should select just the few tables you need.
    • +
    +

    Chat with App Data example

    +

    Getting accurate answers requires some discipline

    +

    You should always start with a specific task or goal in mind (e.g. you want to start a chat session to analyze sales orders as opposed to trying to get a view on customer shipments in an Order processing system).

    +

    Real world production databases are often complex - hundreds of tables, complex and hidden relationships, obtuse column names with no discernable semantics, many dimensions with coded values and so on. By default, when you select your schema we gather all the metadata available in the DB - column names, data types, constraints, table/column descriptions and index all this information (in a vector store). When you pose a question, the engine will extract just the relevant bits from this context and pass it along to the LLM. As you can appreciate, this will not capture a lot of the other knowledge required to train our AI engine to produce good results.

    +

    To significantly increase the accuracy of the results, we recommend you add the additional context using Table and Column descriptions. Please follow through the steps outlined in the Prompt Engg Guide for examples and best practices. We intend to make the process of ā€œtrainingā€ a lot more simpler using other AI smarts in the near future.

    +
    +

    Note

    +

    1) Guardrails - We have guardrails implemented to prevent any undesired SQL injection. Only ā€˜Selectā€™ statements can ever be executed on your DB and all statements, by default, will never fetch more than a 100 rows. To prevent any aggregation class query to over consume resources, any query taking more than 30 seconds will be canceled.
    +2) Lazy Indexing - We lazily index (i.e. add to Vector DB) all the table metadata, descriptions when you ask your first question, after selections. The time taken to index is proportional on the number of tables selected.
    +3) SQL Generation - Your questions are translated into one or more SQL queries using only the table schema information. The SQL queries are always executed in the secure SkySQL environment only.

    +
    +

    Connecting to your Database

    +

    SkyCopilot provides 3 different DB connection target types: a demo server, your SkySQL services, and also any MariaDB server outside SkySQL.

    +

    Connect to our demo database

    +

    This is the easiest way to explore the SkyCopilot functionalities. Just click on the ā€œConnectā€ button, try a sample question and just continue the conversation. Our demo database is populated with the Microsoft Northwind dataset.

    +

    Connect to your SkySQL database

    +

    SkyCopilot attempts to connect to your SkySQL service using the default credentials set up during the service creation and displayed in the SkySQL portal. If the user is removed or the password changed you only need to provide a user with sufficient grants (as noted in the DBA-assist section), the service hostname and port are automatically obtained from the SkySQL APIs.

    +

    Connect to any database

    +

    You are also able to profit from the SkyCopilot functionalities using any MariaDB server outside SkySQL. You just need to make sure that you provide a user with sufficient grants for your database (as noted in the DBA-assist section) and allow the SkyCopilot IP address displayed above the form with the DB details inputs.

    +

    How does it work?

    +

    Within each service you launch in SkySQL, we create an "AI" user that is provided privileges to access the system tables and also run SELECT queries on your DBs but is not granted any privileges that can result in data changes - CREATE, DROP, UPDATE, INSERT, etc., are not granted. This prevents "SQL injection" related problems. The AI user also uses an internal schema with enough "VIEW" definitions for simpler access to the system views. The current design uses what is referred to as "Agentic RAG" -- LLM agents with "Retrieval Augmented Generation".

    +

    Here is how this works, in brief: The AI engine statically consumes our knowledge base along with the schema metadata for all the system tables and indexes all this information into a Vector DB (as embeddings). Each user request (question) is first converted into "embeddings" (think of embedding as a digital representation to fully understand your question). This is then used to retrieve appropriate context using similarity searches from the Vector Database. For instance, a question like "how is my buffer cache utilization" could result in extracting information on commands like "show engine innodb status" or "global status variables" from the Vector DB. This information is then passed to OpenAI's LLM to construct valid MariaDB SQL that can be executed by us. We often pass these results back to OpenAI LLM to synthesize more meaningful answers for the user. Now, this above description is a simplified version of what happens under the covers. In reality, we use an agent that is first handed the question. The agent uses a "reasoning" loop to often break down the question and uses underlying tools we provide to execute the desired action.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/SkyCopilot Guide/quickstart.png b/SkyCopilot Guide/quickstart.png new file mode 100644 index 00000000..94582566 Binary files /dev/null and b/SkyCopilot Guide/quickstart.png differ diff --git a/Support/index.html b/Support/index.html new file mode 100644 index 00000000..5cea5a48 --- /dev/null +++ b/Support/index.html @@ -0,0 +1,2565 @@ + + + + + + + + + + + + + + + + + + + Support - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Support

    +

    SkySQL is operated by a team of site reliability engineers (SREs), support engineers and MariaDB-certified database administrators (SkyDBAs).

    +

    What Support is Available?

    +

    Foundation level support is included with every SkySQL Subscription. Support cases at this standard support level can only be created with the P3 or P4 response SLA.

    +

    Power level support expands the Standard level offering with Problem Resolution Support, Engineering Support and 24x7 support for S1 issues. With provided logs and information. Support will work with Customer through the needed steps for resolution via communication within the Customer Support Portal.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Support FeatureFoundationPower
    AvailabilityFoundation TierPower Tier
    Named Technical Support Contacts310
    Problem Resolution SupportYesYes
    Engineering SupportYesYes
    Active MonitoringYesYes
    Consultative SupportNoYes
    SkyDBA Add-on AvailableNoYes
    Real-Time ChatNoYes, with SkyDBA Add-on Option
    SLA Response Time* P3, 4 hours (24x5)
    * P4, 8 hours (24x5)
    * P1, 30 minutes (24x7)
    * P2, 2 hours (24x5)
    * P3, 4 hours (24x5)
    * P4, 8 hours (24x5)
    +

    A definitive description of SkySQL support can be found in the SkySQL Support Policy.

    +

    How to Request Support

    +

    Support cases are managed through the Support Portal, which is accessible to all registered users.

    +

    Users unable to reach the Support Portal can also use Support Email.

    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Uptime SLA/index.html b/Uptime SLA/index.html new file mode 100644 index 00000000..894a7dda --- /dev/null +++ b/Uptime SLA/index.html @@ -0,0 +1,2803 @@ + + + + + + + + + + + + + + + + + + + + + + + Uptime SLA - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Uptime SLA

    +

    SkySQL customers should assess the availability requirements of their application and choose an appropriate service tier to meet their objectives. SkySQL customers are on the Foundation Tier unless they have specifically purchased and paid for Power Tier service.

    +

    Performance Standard

    + + + + + + + + + + + + + + + + + +
    TierPerformance Standard
    SkySQL Foundation TierMulti-node configurations will deliver a 99.95% service availability on a per-billing-month basis. For example, with this availability target in a 30 day calendar month the maximum service downtime is 21 minutes and 54 seconds.
    SkySQL Power TierMulti-node configurations will deliver a 99.995% service availability on a per-billing-month basis. For example, with this availability target in a 30 day calendar month the maximum service downtime is 2 minutes and 11 seconds.
    +

    Service Downtime

    +

    Service DowntimeĀ is measured at each SkySQL database endpoint as the total number of full minutes, outside of scheduled downtime for maintenance and upgrades, where continuous attempts to establish a connection within the minute fail as reflected in minute-by-minute logs.

    +

    Monthly Uptime Percentage

    +

    Monthly Uptime PercentageĀ is calculated on a per-billing-month basis as the total number of minutes in a month, minus the number of minutes of measuredĀ Service DowntimeĀ within the month, divided by the number of minutes in that month. When a service is deployed for only part of a month, it is assumed to be 100% available for the portion of the month that it is not deployed.

    +

    Service Credit

    +

    Service CreditĀ is the percentage of the total fees paid by you for a given SkySQL service during the month in which the downtime occurred to be credited if SkySQL approves your claim. The percentage used in calculating Service Credit is dependent on whether the customer is on Foundation Tier or Power Tier, and is dependent on the calculatedĀ Monthly Uptime Percentage.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TierMonthly Uptime PercentagePercentage Applied
    Foundation TierLess than 99.95%, but greater than or equal to 99.0%10%
    Foundation TierLess than 99.0%25%
    Power TierLess than 99.995%, but greater than or equal to 99.0%10%
    Power TierLess than 99.0%25%
    +

    SkySQL will grant and process claims, provided the customer has satisfied itsĀ Customer ObligationsĀ and that none of theĀ ExclusionsĀ listed apply to the claim.Ā Service CreditsĀ will be issued only upon request within 60 days of the end of the billing period of the month of impact to service availability, and upon confirmation of outage.Ā Service CreditsĀ will be issued in the form of a monetary credit applied to future use of the service that experienced theĀ Service Downtime.Ā Service CreditsĀ will not be applied to fees for any other SkySQL instance.

    +

    The aggregate maximum number ofĀ Service CreditsĀ to be issued by SkySQL to customers for any and allĀ Service DowntimeĀ that occurs in a single billing month will not exceed 50% of the amount due from the customer for the covered service for the applicable month.

    +

    Customer Obligations

    +

    A customer will forfeit their right to receive aĀ Service CreditĀ unless they:

    +
      +
    • Log a support ticket with SkySQL Support within 60 minutes of first becoming aware of an event that impacts service availability.
    • +
    • Submit a claim and all required information by the end of the month immediately following the month when theĀ Service DowntimeĀ occurred.
    • +
    • Submit necessary information for SkySQL to validate the claim, including:
        +
      • a description of the events resulting in theĀ Service Downtime, and related request logs
      • +
      • the date, time, and duration of theĀ Service Downtime
      • +
      • the number and location(s) of affected users
      • +
      • descriptions of customer attempts to fix theĀ Service DowntimeĀ as it occurred
      • +
      +
    • +
    • Provide reasonable assistance to SkySQL in investigating the cause of theĀ Service DowntimeĀ and investigating your claim.
    • +
    +

    Exclusions

    +
      +
    • +

      Out-of-scope configurations

      +

      TheĀ Performance StandardĀ does not apply to single instance SkySQL service configuration or services in Technical Preview. Customers requiring High Availability should deploy instead in production-ready multi-node service configuration.

      +
    • +
    • +

      Underlying infrastructure

      +

      Impact to service availability caused by availability or performance of cloud services used to operate SkySQL is excluded. This includes any such outages in Amazon Web Services (AWS) and Amazon Elastic Kubernetes Service (EKS), and Google Cloud Platform (GCP) and Google Kubernetes Engine (GKE).

      +
    • +
    • +

      Network interruption

      +

      Impact to service availability caused by blocking of network traffic by ISPs, network providers, governments, or third parties is excluded.

      +
    • +
    • +

      External factors

      +

      Impact to your use of service based on factors outside SkySQL are excluded. This includes periods of downtime for your applications.

      +
    • +
    • +

      Uncorroborated impacts

      +

      Only impacts to service availability detected atĀ point of measurementĀ are subject when determining the uptime percentage. Service availability impacts measured through any other means, such as application instrumentation, are excluded except as also measured asĀ Service DowntimeĀ by SkySQL.

      +
    • +
    • +

      Portal access

      +

      Impact to your ability to access or use the SkySQL portal, an interface provided to manage services, is excluded. This includes any component and content linked from the SkySQL portal, including Documentation, the Customer Support portal, Monitoring, and Workload Analysis. These components operate independently from database services and do not impact database availability.

      +
    • +
    • +

      Resource usage

      +

      Impact to service availability caused by usage of system resources, such as problems caused by excessive workload consumption of CPU, disk I/O, disk capacity, memory, and other system resources, are excluded.

      +
    • +
    • +

      Clients and connectors

      +

      Impact to service availability caused by the use of unsupported third-party clients and connectors is excluded.

      +
    • +
    • +

      Non-paying customers

      +

      TheĀ Performance StandardĀ applies only to paying SkySQL customers who are paid-in-full. All other SkySQL customers, including those not paid-in-full and those customers participating in a free or credited service trial, are excluded.

      +
    • +
    • +

      Customer-directed maintenance

      +

      When a customer directs that SkySQL conduct a maintenance operation on a service, any resulting impact to service availability is excluded.

      +
    • +
    • +

      Customer-approved maintenance

      +

      When a customer approves SkySQL-recommended maintenance on a service, any resulting impact to service availability is excluded.

      +
    • +
    • +

      Customer-initiated changes

      +

      When a customer initiates changes to their SkySQL services, e.g., via access to the database or via the SkySQL portal, any resulting impact to service availability is excluded.

      +
    • +
    • +

      Initial provisioning

      +

      Availability of services during initial provisioning, e.g., before a service becomes online, healthy, and available, is excluded.

      +
    • +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Using AWS GCP private VPC connections/Setting up AWS Private Link/index.html b/Using AWS GCP private VPC connections/Setting up AWS Private Link/index.html new file mode 100644 index 00000000..22dc730a --- /dev/null +++ b/Using AWS GCP private VPC connections/Setting up AWS Private Link/index.html @@ -0,0 +1,2957 @@ + + + + + + + + + + + + + + + + + + + + + + + Setting up AWS Private Link - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Setting up AWS Private Link

    +

    AWS PrivateLink is an AWS service that enables secure and private connectivity between Virtual Private Clouds (VPCs) and third-party services. By using PrivateLink with SkySQL services, traffic does not traverse the public internet, which enhances security and reduces exposure to potential threats.

    +

    For detailed information about AWS PrivateLink, seeĀ "AWS PrivateLink" (Amazon documentation).

    +

    Considerations

    +
      +
    • AWS PrivateLink is used for private connections within the same AWS region. The SkySQL service and the connection VPC must be in the same region.
    • +
    • When using SkySQL with AWS PrivateLink, all connections occur through private endpoints. If you need to connect to the service from outside your VPC, you will need to use a VPN or other mechanism to go through the connected VPC. Alternatively, SkySQL can be configured to provide a second, public endpoint for an additional fee.
    • +
    • A list of AWS Account IDs that will be allowed to connect to the SkySQL service must be provided when enabling AWS PrivateLink. This list can be updated at any time.
    • +
    • The SkySQL IP Allowlist is not used with AWS PrivateLink connections. Access to the SkySQL service will be controlled by Security Groups in the connecting VPC. For detailed information, seeĀ "Control traffic to resources using security groups"Ā (Amazon documentation).
    • +
    • Connections to SkySQL services by features such as SkySQL backups, and monitoring do not depend on AWS PrivateLink.
    • +
    • Query Editor is not supported when AWS PrivateLink is enabled.
    • +
    + +
    +Enable Privatelink via the SkySQL Portal +
    + +To enable AWS PrivateLink when launching a new service via the SkySQL Portal select the 'Enable Private link' option in the 'Security' section. +After the service completes provisioning, you will see a new option to "Set up Private Link" in the service's context menu. Click this option to add one or more AWS account IDs to the allowlist. + +
    + +
    +Enable Privatelink via the SkySQL DBaaS API +
    + +To enable AWS PrivateLink when launching a new service via the SkySQL DBaaS API, add theĀ `endpoint_mechanism`Ā andĀ `endpoint_allowed_accounts`Ā attributes to service creation JSON payload. + +
    {
    +  "name": "my-skysql-service",
    +  ...
    +  "endpoint_mechanism": "privateconnect",
    +  "allowed_accounts": [
    +    "AWS-ACCOUNT-ID-1",
    +    "AWS-ACCOUNT-ID-2"
    +  ]
    +}
    +
    +- The `endpoint_mechanism` field must be set to `privateconnect` +- The `endpoint_allowed_accounts` field must be set to a JSON array of one or more customer account IDs in AWS that will be allowed to establish a private connection to the SkySQL service. + +For more information on using the SkySQL DBaaS API, seeĀ ["SkySQL DBaaS API"](https://apidocs.skysql.com/#/Services/post_provisioning_v1_services). +
    + +
    +Enable Privatelink via the SkySQL Terraform Provider +
    + +To enable AWS PrivateLink when launching a new service via the SkySQL DBaaS API, set theĀ `endpoint_mechanism`Ā andĀ `endpoint_allowed_accounts` attributes on the `skysql_service` resource. + +
    resource "skysql_service" "example" {
    +  name                      = "my-skysql-service"
    +  ...
    +  endpoint_mechanism        = "privateconnect"
    +  endpoint_allowed_accounts = ["123456789012"]
    +}
    +
    + +- The `endpoint_mechanism` field must be set to `privateconnect` +- The `endpoint_allowed_accounts` field must be set to a list of one or more customer account IDs in AWS that will be allowed to establish a private connection to the SkySQL service. + +A complete example Terraform template that creates a new SkySQL service with AWS PrivateLink enabled can be found in theĀ [terraform provider examples](https://github.com/skysqlinc/terraform-provider-skysql/tree/main/examples/privateconnect). + + +For more information on using the SkySQL Terraform Provider, seeĀ ["SkySQL Terraform Provider"](https://registry.terraform.io/providers/skysqlinc/skysql/latest/docs). + +
    + +

    For the next step, see theĀ AWS Endpoint SetupĀ section on this page.

    + +
    +

    [!CAUTION] +Enabling PrivateLink on an existing service will cause all existing connections to be dropped. The service will be unavailable for a short period of time while the public endpoint is replaced with the new PrivateLink endpoint.

    +
    +
    +Enable AWS PrivateLink on an existing service via the SkySQL Portal: +
    + +1. Log in to theĀ SkySQL Portal +2. Click the "MANAGE" button (on the right) for the desired service. +3. In the context menu, choose the "Set up AWS PrivateLink" menu item. +4. In the popup window, add one or more AWS account IDs. +5. Click the "OK" button to confirm this operation. + +
    + +
    +Enable AWS PrivateLink on an existing service via the SkySQL DBaaS API: +
    + +To enable AWS PrivateLink on an existing service, you will need to update the service endpoints with a payload similar to the following: + +
    [
    +  {
    +    "mechanism": "privateconnect",
    +    "allowed_accounts": [
    +      "AWS-ACCOUNT-ID-1",
    +      "AWS-ACCOUNT-ID-2"
    +    ]
    +  }
    +]
    +
    + +This payload should then be sent to the API `PATCH` https://api.skysql.com/provisioning/v1/services/{SERVICE_ID}/endpoints where `{SERVICE_ID}` is the ID of the service you are updating. +For more information on using the SkySQL DBaaS API, seeĀ ["SkySQL DBaaS API"](https://apidocs.skysql.com/#/Services/patch_provisioning_v1_services__service_id__endpoints). + +
    + +

    For the next step, see theĀ AWS Endpoint SetupĀ section on this page.

    +

    AWS Endpoint Setup

    +

    To connect to a SkySQL service using AWS PrivateLink, you must create an endpoint in your VPC that connects to the SkySQL service. The endpoint will be used by clients in your VPC to connect to the SkySQL service.

    +

    Pre-requisites

    +
      +
    • You must have a VPC in the same region as the SkySQL service.
    • +
    • You must create a security group that will be used to control access to the SkySQL service endpoint.
        +
      • This security group should contain rules to allow traffic from your client instances to the port of the SkySQL service (usually 3306).
      • +
      • You must create a rule in your security group for each IP range or other security group that will be allowed to connect to the SkySQL service.
      • +
      • The security group must be associated with the VPC that you will use to connect to the SkySQL service.
      • +
      +
    • +
    • You will need to lookup the Endpoint Service ID that SkySQL provisioned for you when you created your SkySQL Service.
        +
      • This ID can be found in the "Connect" window of the SkySQL portal.
      • +
      • If using the SkySQL DBaaS API, the ID can be found in the response of the service details API call. +
        curl https://api.skysql.com/provisioning/v1/services/{SERVICE_ID} | jq ".endpoints[0].endpoint_service"
        +
      • +
      +
    • +
    +

    VPC Endpoint Creation Steps

    +
      +
    1. Log in to the AWS console.
    2. +
    3. Confirm the correct region is selected.
    4. +
    5. Navigate to the "VPC" page, then the "Endpoints" section.
    6. +
    7. Click the "Create Endpoint" button.
    8. +
    9. In the "Name tag" field, enter a name for the new endpoint. This name can be anything you like.
    10. +
    11. Set the Service category to "Other endpoint services".
    12. +
    13. The value for the "Service name" field must be set to the value of theĀ Endpoint Service ID provided to you by SkySQL. See Pre-requisites for more information on how to find this ID.
    14. +
    15. Click "Verify service". AWS should find the service and auto-populate the rest of the form.
    16. +
    17. In the VPC search field, find the VPC that you want to use for the interconnect between the clients and the SkySQL service.
    18. +
    19. In the Subnets section, it is suggested that you select all the Availability Zones in the list, entering the proper subnet ID for each one. If you are unsure, view the details of your running instances to see the Subnet ID that they have configured.
    20. +
    21. Select IPv4 for "IP address type".
    22. +
    23. For the "Security Groups" section, assign the security groups that will allow your client instance to connect to your VPC endpoint. See Pre-requisites for more information on setting up security groups.
    24. +
    25. Press the "Create endpoint" button. Endpoint creation may take several minutes. When complete, status will change from "Pending" to "Available".
    26. +
    +

    After creation, the Endpoint will be in Pending status while AWS provisions the new endpoint. Once the endpoint is Available, you can connect to your SkySQL service using the new endpoint. +The newly created endpoint now authorizes the internal IPs or security groups that you specified in the Source values to access the SkySQL service's connection port. When testing a client connection, ensure that the client host is authorized by the security group's Source settings and that you're using the "readwrite" port plus the appropriate username and password (either the default values or the value for any user you have created).

    +

    Connecting to your SkySQL Service

    +

    After creating your VPC endpoint, AWS will create a number of DNS records that will resolve to the private IP addresses of your Privatelink Endpoint. +- The first DNS name in the list can be used from any availability zone in your VPC and will resolve to the private IP address of the endpoint in the same availability zone. +- The following DNS names provided are availability zone specific and rely on the user to match the correct DNS name to the availability zone of the client instance. +- If connecting via these DNS names, we recommend using the first DNS name in the list to ensure that the connection is routed to the correct availability zone.

    +
    +

    [!NOTE] +The DNS names provided by AWS will always be in the domain amazonaws.com. If connecting to your SkySQL service using SSL/TLS, the database certificate will not match the VPC endpoint name. Due to this, we recommend Enabling Private DNS for AWS PrivateLink.

    +
    + +

    In order to connect to your SkySQL service using the skysql.com service name provided in the SkySQL portal, you must enable Private DNS for the VPC endpoint. This will allow the service name to resolve to the private IP address of the SkySQL service.

    +

    The following requirements must be met to enable Private DNS for the VPC endpoint: +- Private DNS must be enabled for the VPC. +- Your client instances must use the default AWS DNS server provided by the VPC (this is usually turned on by default).

    +

    To enable Private DNS for the VPC endpoint: +1. Log in to the AWS console. +2. Confirm the correct region is selected. +3. Navigate to the "VPC" page, then the "Endpoints" section. +4. Select the VPC endpoint that you created for your SkySQL service. +5. Click the "Actions" button, then select "Modify Private DNS Name". +6. In the popup window, select the checkbox to "Enable Private DNS Name". +7. Click the "Save changes" button.

    +

    After a short period of time, the service name provided in the SkySQL portal should resolve to the private IP address of the SkySQL service via PrivateLink. You can test this by connecting to the service using the service name provided in the portal.

    + +
    +

    [!CAUTION] +Disabling PrivateLink on an existing service will cause all existing connections to be dropped. The service will be unavailable for a short period of time while the private endpoint is replaced with the new public endpoint.

    +
    +
    +Disable AWS PrivateLink via the SkySQL Portal +
    + +1. Visit theĀ [SkySQL Portal](https://app.skysql.com/) +2. Find the service that you would like to modify. +3. Click "MANAGE" on the far right side of the service listing. +4. In the context menu, select "Manage PrivateLink". +5. In the popup window, click "I want to disconnect my Private Link". +6. In the popup window, select "Disconnect". +7. Since the service's allowlist was cleared when AWS PrivateLink was previously enabled, you will need toĀ [update the allowlist](../Security/Configuring%20Firewall.md) to allow clients to connect after disabling PrivateLink. + +
    + +
    +Disable AWS PrivateLink via the SkySQL DBaaS API +
    + +To disable AWS PrivateLink on an existing service, you will need to update the service endpoints with a payload similar to the following: + +
    [
    +  {
    +    "mechanism": "nlb"
    +  }
    +]
    +
    + +This payload should then be sent to the API `PATCH` https://api.skysql.com/provisioning/v1/services/{SERVICE_ID}/endpoints where `{SERVICE_ID}` is the ID of the service you are updating. +For more information on using the SkySQL DBaaS API, seeĀ ["SkySQL DBaaS API"](https://apidocs.skysql.com/#/Services/patch_provisioning_v1_services__service_id__endpoints). + +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Using AWS GCP private VPC connections/Setting up GCP Private Service Connect/index.html b/Using AWS GCP private VPC connections/Setting up GCP Private Service Connect/index.html new file mode 100644 index 00000000..96a244b8 --- /dev/null +++ b/Using AWS GCP private VPC connections/Setting up GCP Private Service Connect/index.html @@ -0,0 +1,2989 @@ + + + + + + + + + + + + + + + + + + + + + + + Setting up GCP Private Service Connect - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + +

    Setting up GCP Private Service Connect

    +

    Google Private Service Connect (PSC) is a Google Cloud service that enables secure and private connectivity between Virtual Private Clouds (VPCs) and third-party services. By using PSC with SkySQL services, traffic does not traverse the public internet, which enhances security and reduces exposure to potential threats.

    +

    For detailed information about Google PSC, see "Private Service Connect" (Google documentation).

    +

    Considerations

    +
      +
    • PSC is used for private connections within the same Google Cloud region. The SkySQL service and the connection VPC must be in the same region.
    • +
    • When using SkySQL with PSC, all connections occur through private endpoints. If you need to connect to the service from outside your VPC, you will need to use a VPN or other mechanism to go through the connected VPC. Alternatively, SkySQL can be configured to provide a second, public endpoint for an additional fee.
    • +
    • A list of Google Cloud project IDs that will be allowed to connect to the SkySQL service must be provided when enabling PSC. This list can be updated at any time.
    • +
    • The SkySQL IP Allowlist is not used with PSC connections. Access to the SkySQL service can be controlled by setting up firewall rules inside the connecting VPC.
    • +
    • Connections to SkySQL services by features such as SkySQL backups, and monitoring do not depend on PSC.
    • +
    • Query Editor is not supported when PSC is enabled.
    • +
    • PSC has connection limits which refer to the number of endpoints that can be created to a single PSC service within Google Cloud. Database connection limits are independent from PSC connection limits. The limit for PSC connections is 10.
    • +
    +

    Enable Private Service Connect on Service Launch

    +
    +Enable Google PSC via the SkySQL Portal +
    + +To enable PSC when launching a new service via the SkySQL Portal select the 'Google Private Service Connect' option in the 'Security' section. +After the service completes provisioning, you will see a new option to "Manage Google Private Service Connect" in the service's context menu. Click this option to add one or more Google project IDs to the allowlist. + +
    + +
    +Enable Google PSC via the SkySQL DBaaS API +
    + +To enable Google PSC when launching a new service via the SkySQL DBaaS API, add theĀ `endpoint_mechanism`Ā andĀ `endpoint_allowed_accounts`Ā attributes to service creation JSON payload. + +
    {
    +  "name": "my-skysql-service",
    +  ...
    +  "endpoint_mechanism": "privateconnect",
    +  "allowed_accounts": [
    +    "GCP-PROJECT-ID-1",
    +    "GCP-PROJECT-ID-2"
    +  ]
    +}
    +
    +- The `endpoint_mechanism` field must be set to `privateconnect` +- The `endpoint_allowed_accounts` field must be set to a JSON array of one or more client project IDs in Google Cloud that will be allowed to establish a private connection to the SkySQL service. + +For more information on using the SkySQL DBaaS API, seeĀ ["SkySQL DBaaS API"](https://apidocs.skysql.com/#/Services/post_provisioning_v1_services). +
    + +
    +Enable Google PSC via the SkySQL Terraform Provider +
    + +To enable Google PSC when launching a new service via the SkySQL DBaaS API, set theĀ `endpoint_mechanism`Ā andĀ `endpoint_allowed_accounts` attributes on the `skysql_service` resource. + +
    resource "skysql_service" "example" {
    +  name                      = "my-skysql-service"
    +  ...
    +  endpoint_mechanism        = "privateconnect"
    +  endpoint_allowed_accounts = ["GCP-PROJECT-ID-1", "GCP-PROJECT-ID-2"]
    +}
    +
    + +- The `endpoint_mechanism` field must be set to `privateconnect` +- The `endpoint_allowed_accounts` field must be set to a list of one or more customer project IDs in Google Cloud that will be allowed to establish a private connection to the SkySQL service. + +A complete example Terraform template that creates a new SkySQL service with Google PSC enabled can be found in theĀ [terraform provider examples](https://github.com/skysqlinc/terraform-provider-skysql/tree/main/examples/private-service-connect). + + +For more information on using the SkySQL Terraform Provider, seeĀ ["SkySQL Terraform Provider"](https://registry.terraform.io/providers/skysqlinc/skysql/latest/docs). + +
    + +

    For the next step, see theĀ PSC Endpoint SetupĀ section on this page.

    +

    Enable Google PSC on an Existing SkySQL Service

    +
    +

    [!CAUTION] +Enabling PSC on an existing service will cause all existing connections to be dropped. The service will be unavailable for a short period of time while the public endpoint is replaced with the new PSC endpoint.

    +
    +
    +Enable Google PSC on an existing service via the SkySQL Portal: +
    + +1. Log in to theĀ SkySQL Portal +2. Click the "MANAGE" button (on the right) for the desired service. +3. In the context menu, choose the "Set up Google Private Service Connect" menu item. +4. In the popup window, add one or more GCP project IDs. +5. Click the "OK" button to confirm this operation. + +
    + +
    +Enable Google PSC on an existing service via the SkySQL DBaaS API: +
    + +To enable Google PSC on an existing service, you will need to update the service endpoints with a payload similar to the following: + +
    [
    +  {
    +    "mechanism": "privateconnect",
    +    "allowed_accounts": [
    +      "GOOGLE-PROJECT-ID-1",
    +      "GOOGLE-PROJECT-ID-2"
    +    ]
    +  }
    +]
    +
    + +This payload should then be sent to the API `PATCH` https://api.skysql.com/provisioning/v1/services/{SERVICE_ID}/endpoints where `{SERVICE_ID}` is the ID of the service you are updating. +For more information on using the SkySQL DBaaS API, seeĀ ["SkySQL DBaaS API"](https://apidocs.skysql.com/#/Services/patch_provisioning_v1_services__service_id__endpoints). + +
    + +

    For the next step, see theĀ PSC Endpoint SetupĀ section on this page.

    +

    Private Service Connect Endpoint Setup

    +

    To connect to a SkySQL service using Google PSC, you must create an endpoint in your VPC that connects to the SkySQL service. The endpoint will be used by clients in your VPC to connect to the SkySQL service.

    +

    Pre-requisites

    +
      +
    • You must have a VPC in the same region as the SkySQL service.
    • +
    • You will need to lookup the Endpoint Service ID that SkySQL provisioned for you when you created your SkySQL Service.
        +
      • This ID can be found in the "Connect" window of the SkySQL portal.
      • +
      • If using the SkySQL DBaaS API, the ID can be found in the response of the service details API call. +
        curl https://api.skysql.com/provisioning/v1/services/{SERVICE_ID} | jq ".endpoints[0].endpoint_service"
        +
      • +
      +
    • +
    +

    Create a Subnet (optional)

    +

    We recommend use of a subnet dedicated to Private Service Connect endpoints in the same VPC where the application is running.

    +
      +
    1. In the GCP console, navigate VPC network ā†’ VPC networks ā†’ ā†’ SUBNETS ā†’ ADD SUBNET.
        +
      • Replace with the name of the VPC where the application is running.
      • +
      +
    2. +
    3. Configure the subnet:
        +
      • Name
      • +
      • Region: select the same region as the one where the application runs
      • +
      • Purpose: None
      • +
      • IP address range: Set a CIDR block that doesn't overlap with the CIDR blocks of the existing subnets in the same VPC.
      • +
      • Optionally configure Private Google Access
      • +
      • Optionally configure Flow logs
      • +
      • Click "ADD".
      • +
      +
    4. +
    +

    Create a Static Internal IP Address

    +
      +
    1. In the GCP console, navigate VPC network ā†’ VPC networks ā†’ ā†’ STATIC INTERNAL IP ADDRESSES ā†’ RESERVE STATIC ADDRESS.
        +
      • Replace with the name of the VPC where the application is running.
      • +
      +
    2. +
    3. Configure the static internal IP address:
        +
      • Name: set to the Database ID (dbxxxxxxxx) from SkySQL.
      • +
      • Subnet: select the subnet where to reserve the static IP address.
      • +
      • Static IP address: optionally choose the address.
      • +
      • Purpose: Non-shared
      • +
      • Click "RESERVE".
      • +
      +
    4. +
    +

    VPC Endpoint Creation Steps

    +
      +
    1. +

      In the GCP console, navigate Network services ā†’ Private Service Connect ā†’ CONNECTED ENDPOINTS ā†’ CONNECT ENDPOINT.

      +
    2. +
    3. +

      Configure the endpoint connection:

      +
        +
      • Target: Published service
      • +
      • Target service: the value of theĀ Endpoint Service ID. See Pre-requisites for more information on how to find this ID.
      • +
      • Endpoint name: set to the Database ID from SkySQL (dbxxxxxxxx)
      • +
      • Network: select the VPC network where the application is running
      • +
      • Subnetwork: select the subnet where the static internal IP address is reserved
      • +
      • IP address: select the reserved internal IP address from the prior step
      • +
      • Click "ADD ENDPOINT".
      • +
      +
    4. +
    +

    After creation, the Endpoint should have a status of Accepted. If this status is not present, please ensure your Google Project ID is added to the list of allowed accounts in the SkySQL portal for this service.

    +

    Connecting to your SkySQL Service

    +

    After creating your PSC endpoint, your service should be available within your VPC at the Private IP Address you assigned to the endpoint. +- DNS propagation from SkySQL to the Private IP address is not supported when using PSC. +- The hostname when connecting to your SkySQL service should always be the Private IP address of the PSC endpoint.

    +
    +

    [!NOTE] +When using PSC with SSL/TLS, there will be a hostname mismatch since the hostname provisioned by SkySQL will not match your internal IP Address. This can be ignored as the connection is still secure.

    +
    +

    Disabling Google PSC

    +
    +

    [!CAUTION] +Disabling PSC on an existing service will cause all existing connections to be dropped. The service will be unavailable for a short period of time while the private endpoint is replaced with the new public endpoint.

    +
    +
    +Disable Google PSC via the SkySQL Portal +
    + +1. Visit theĀ [SkySQL Portal](https://app.skysql.com/) +2. Find the service that you would like to modify. +3. Click "MANAGE" on the far right side of the service listing. +4. In the context menu, select "Manage your Private Service Connect". +5. In the popup window, click "I want to disconnect my Private Service Connect". +6. In the popup window, select "Disconnect". +7. Since the service's allowlist was cleared when Goolge PSC was previously enabled, you will need toĀ [update the allowlist](../Security/Configuring%20Firewall.md) to allow clients to connect after disabling PSC. + +
    + +
    +Disable Google PSC via the SkySQL DBaaS API +
    + +To disable Google PSC on an existing service, you will need to update the service endpoints with a payload similar to the following: + +
    [
    +  {
    +    "mechanism": "nlb"
    +  }
    +]
    +
    + +This payload should then be sent to the API `PATCH` https://api.skysql.com/provisioning/v1/services/{SERVICE_ID}/endpoints where `{SERVICE_ID}` is the ID of the service you are updating. +For more information on using the SkySQL DBaaS API, seeĀ ["SkySQL DBaaS API"](https://apidocs.skysql.com/#/Services/patch_provisioning_v1_services__service_id__endpoints). + +
    + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/Using AWS GCP private VPC connections/index.html b/Using AWS GCP private VPC connections/index.html new file mode 100644 index 00000000..e60fbce4 --- /dev/null +++ b/Using AWS GCP private VPC connections/index.html @@ -0,0 +1,2526 @@ + + + + + + + + + + + + + + + + + + + + + + + Using AWS/GCP private VPC connections - SkySQL Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + +

    Using AWS/GCP private VPC connections

    +

    By default, connections to SkySQL cloud databases occur with TLS/SSL encryption and can be initiated only from allowlisted IP addresses.

    +

    Some customers have regulatory requirements or information security policies that prohibit database connections over the public internet, and result in a requirement for private connections.

    +

    SkySQL cloud databases can optionally be configured for private connections between your VPC (virtual private clouds) and SkySQL cloud databases:

    + + + + + + + + + + + + + + +
    +
    + + + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/architecture.png b/architecture.png new file mode 100644 index 00000000..9be4f492 Binary files /dev/null and b/architecture.png differ diff --git a/assets/favicon.png b/assets/favicon.png new file mode 100644 index 00000000..330f2ee6 Binary files /dev/null and b/assets/favicon.png differ diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.56dfad97.min.js b/assets/javascripts/bundle.56dfad97.min.js new file mode 100644 index 00000000..1df62cd7 --- /dev/null +++ b/assets/javascripts/bundle.56dfad97.min.js @@ -0,0 +1,16 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var Wi=Object.getOwnPropertyDescriptor;var Ui=Object.getOwnPropertyNames,Vt=Object.getOwnPropertySymbols,Di=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,io=Object.prototype.propertyIsEnumerable;var no=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,$=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&no(e,r,t[r]);if(Vt)for(var r of Vt(t))io.call(t,r)&&no(e,r,t[r]);return e};var ao=(e,t)=>{var r={};for(var o in e)yr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Vt)for(var o of Vt(e))t.indexOf(o)<0&&io.call(e,o)&&(r[o]=e[o]);return r};var xr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Vi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Ui(t))!yr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=Wi(t,n))||o.enumerable});return e};var Lt=(e,t,r)=>(r=e!=null?Fi(Di(e)):{},Vi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var so=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var po=xr((Er,co)=>{(function(e,t){typeof Er=="object"&&typeof co!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(k){return!!(k&&k!==document&&k.nodeName!=="HTML"&&k.nodeName!=="BODY"&&"classList"in k&&"contains"in k.classList)}function p(k){var ft=k.type,qe=k.tagName;return!!(qe==="INPUT"&&a[ft]&&!k.readOnly||qe==="TEXTAREA"&&!k.readOnly||k.isContentEditable)}function c(k){k.classList.contains("focus-visible")||(k.classList.add("focus-visible"),k.setAttribute("data-focus-visible-added",""))}function l(k){k.hasAttribute("data-focus-visible-added")&&(k.classList.remove("focus-visible"),k.removeAttribute("data-focus-visible-added"))}function f(k){k.metaKey||k.altKey||k.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(k){o=!1}function d(k){s(k.target)&&(o||p(k.target))&&c(k.target)}function y(k){s(k.target)&&(k.target.classList.contains("focus-visible")||k.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(k.target))}function M(k){document.visibilityState==="hidden"&&(n&&(o=!0),X())}function X(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function te(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(k){k.target.nodeName&&k.target.nodeName.toLowerCase()==="html"||(o=!1,te())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",M,!0),X(),r.addEventListener("focus",d,!0),r.addEventListener("blur",y,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var qr=xr((ly,Sn)=>{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ka=/["'&<>]/;Sn.exports=Ha;function Ha(e){var t=""+e,r=ka.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT Ā© Zeno Rocha + */(function(t,r){typeof It=="object"&&typeof Yr=="object"?Yr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof It=="object"?It.ClipboardJS=r():t.ClipboardJS=r()})(It,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return ji}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(A){return!1}}var d=function(A){var L=f()(A);return u("cut"),L},y=d;function M(V){var A=document.documentElement.getAttribute("dir")==="rtl",L=document.createElement("textarea");L.style.fontSize="12pt",L.style.border="0",L.style.padding="0",L.style.margin="0",L.style.position="absolute",L.style[A?"right":"left"]="-9999px";var F=window.pageYOffset||document.documentElement.scrollTop;return L.style.top="".concat(F,"px"),L.setAttribute("readonly",""),L.value=V,L}var X=function(A,L){var F=M(A);L.container.appendChild(F);var D=f()(F);return u("copy"),F.remove(),D},te=function(A){var L=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},F="";return typeof A=="string"?F=X(A,L):A instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(A==null?void 0:A.type)?F=X(A.value,L):(F=f()(A),u("copy")),F},J=te;function k(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?k=function(L){return typeof L}:k=function(L){return L&&typeof Symbol=="function"&&L.constructor===Symbol&&L!==Symbol.prototype?"symbol":typeof L},k(V)}var ft=function(){var A=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},L=A.action,F=L===void 0?"copy":L,D=A.container,Y=A.target,$e=A.text;if(F!=="copy"&&F!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&k(Y)==="object"&&Y.nodeType===1){if(F==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(F==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if($e)return J($e,{container:D});if(Y)return F==="cut"?y(Y):J(Y,{container:D})},qe=ft;function Fe(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Fe=function(L){return typeof L}:Fe=function(L){return L&&typeof Symbol=="function"&&L.constructor===Symbol&&L!==Symbol.prototype?"symbol":typeof L},Fe(V)}function Ai(V,A){if(!(V instanceof A))throw new TypeError("Cannot call a class as a function")}function oo(V,A){for(var L=0;L0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Fe(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function($e){return Y.onClick($e)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,$e=this.action(Y)||"copy",Dt=qe({action:$e,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Dt?"success":"error",{action:$e,text:Dt,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return y(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,$e=!!document.queryCommandSupported;return Y.forEach(function(Dt){$e=$e&&!!document.queryCommandSupported(Dt)}),$e}}]),L}(s()),ji=Ii},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,d,y){var M=c.apply(this,arguments);return l.addEventListener(u,M,y),{destroy:function(){l.removeEventListener(u,M,y)}}}function p(l,f,u,d,y){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(M){return s(M,f,u,d,y)}))}function c(l,f,u,d){return function(y){y.delegateTarget=a(y.target,f),y.delegateTarget&&d.call(l,y)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,d,y){if(!u&&!d&&!y)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(y))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,d,y);if(a.nodeList(u))return l(u,d,y);if(a.string(u))return f(u,d,y);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,d,y){return u.addEventListener(d,y),{destroy:function(){u.removeEventListener(d,y)}}}function l(u,d,y){return Array.prototype.forEach.call(u,function(M){M.addEventListener(d,y)}),{destroy:function(){Array.prototype.forEach.call(u,function(M){M.removeEventListener(d,y)})}}}function f(u,d,y){return s(document.body,u,d,y)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||p(d,M)})},y&&(n[d]=y(n[d])))}function p(d,y){try{c(o[d](y))}catch(M){u(i[0][3],M)}}function c(d){d.value instanceof nt?Promise.resolve(d.value.v).then(l,f):u(i[0][2],d)}function l(d){p("next",d)}function f(d){p("throw",d)}function u(d,y){d(y),i.shift(),i.length&&p(i[0][0],i[0][1])}}function fo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof he=="function"?he(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function H(e){return typeof e=="function"}function ut(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ut(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var We=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=he(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(M){t={error:M}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(H(l))try{l()}catch(M){i=M instanceof zt?M.errors:[M]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=he(f),d=u.next();!d.done;d=u.next()){var y=d.value;try{uo(y)}catch(M){i=i!=null?i:[],M instanceof zt?i=q(q([],N(i)),N(M.errors)):i.push(M)}}}catch(M){o={error:M}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)uo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=We.EMPTY;function qt(e){return e instanceof We||e&&"closed"in e&&H(e.remove)&&H(e.add)&&H(e.unsubscribe)}function uo(e){H(e)?e():e.unsubscribe()}var Pe={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var dt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new We(function(){o.currentObservers=null,Qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new j;return r.source=this,r},t.create=function(r,o){return new wo(r,o)},t}(j);var wo=function(e){oe(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){oe(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var At={now:function(){return(At.delegate||Date).now()},delegate:void 0};var Ct=function(e){oe(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=At);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(gt);var Oo=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(yt);var kr=new Oo(So);var Mo=function(e){oe(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=vt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(vt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(gt);var Lo=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(yt);var me=new Lo(Mo);var S=new j(function(e){return e.complete()});function Yt(e){return e&&H(e.schedule)}function Hr(e){return e[e.length-1]}function Xe(e){return H(Hr(e))?e.pop():void 0}function ke(e){return Yt(Hr(e))?e.pop():void 0}function Bt(e,t){return typeof Hr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return H(e==null?void 0:e.then)}function Jt(e){return H(e[bt])}function Xt(e){return Symbol.asyncIterator&&H(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ji(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Ji();function tr(e){return H(e==null?void 0:e[er])}function rr(e){return mo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return H(e==null?void 0:e.getReader)}function W(e){if(e instanceof j)return e;if(e!=null){if(Jt(e))return Xi(e);if(xt(e))return Zi(e);if(Gt(e))return ea(e);if(Xt(e))return _o(e);if(tr(e))return ta(e);if(or(e))return ra(e)}throw Zt(e)}function Xi(e){return new j(function(t){var r=e[bt]();if(H(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Zi(e){return new j(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?De(t):qo(function(){return new ir}))}}function jr(e){return e<=0?function(){return S}:E(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,d=0,y=!1,M=!1,X=function(){f==null||f.unsubscribe(),f=void 0},te=function(){X(),l=u=void 0,y=M=!1},J=function(){var k=l;te(),k==null||k.unsubscribe()};return E(function(k,ft){d++,!M&&!y&&X();var qe=u=u!=null?u:r();ft.add(function(){d--,d===0&&!M&&!y&&(f=Wr(J,p))}),qe.subscribe(ft),!l&&d>0&&(l=new at({next:function(Fe){return qe.next(Fe)},error:function(Fe){M=!0,X(),f=Wr(te,n,Fe),qe.error(Fe)},complete:function(){y=!0,X(),f=Wr(te,a),qe.complete()}}),W(k).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function P(e,t=document){return Array.from(t.querySelectorAll(e))}function R(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Ie(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=O(h(document.body,"focusin"),h(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Ie()||document.body),G(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function $t(e,t){return C(()=>O(h(e,"mouseenter").pipe(m(()=>!0)),h(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Go(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Go(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Go(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Tt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),O(h(t,"load"),h(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),_(()=>document.head.removeChild(t)),Te(1))))}var Jo=new g,Ea=C(()=>typeof ResizeObserver=="undefined"?Tt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Jo.next(t)))),v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ea.pipe(w(r=>r.observe(t)),v(r=>Jo.pipe(b(o=>o.target===t),_(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function St(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Xo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ve(e){return{x:e.offsetLeft,y:e.offsetTop}}function Zo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function en(e){return O(h(window,"load"),h(window,"resize")).pipe(Le(0,me),m(()=>Ve(e)),Q(Ve(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ne(e){return O(h(e,"scroll"),h(window,"scroll"),h(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var tn=new g,wa=C(()=>I(new IntersectionObserver(e=>{for(let t of e)tn.next(t)},{threshold:0}))).pipe(v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function tt(e){return wa.pipe(w(t=>t.observe(e)),v(t=>tn.pipe(b(({target:r})=>r===e),_(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function rn(e,t=16){return Ne(e).pipe(m(({y:r})=>{let o=ce(e),n=St(e);return r>=n.height-o.height-t}),K())}var lr={drawer:R("[data-md-toggle=drawer]"),search:R("[data-md-toggle=search]")};function on(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function ze(e){let t=lr[e];return h(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function Ta(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Sa(){return O(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function nn(){let e=h(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:on("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Ie();if(typeof o!="undefined")return!Ta(o,r)}return!0}),pe());return Sa().pipe(v(t=>t?S:e))}function ye(){return new URL(location.href)}function lt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function an(){return new g}function sn(){return location.hash.slice(1)}function cn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Oa(e){return O(h(window,"hashchange"),e).pipe(m(sn),Q(sn()),b(t=>t.length>0),G(1))}function pn(e){return Oa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function Pt(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function ln(){let e=matchMedia("print");return O(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():S))}function zr(e,t){return new j(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function je(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function fn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function un(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function dn(){return O(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(un),Q(un()))}function hn(){return{width:innerWidth,height:innerHeight}}function bn(){return h(window,"resize",{passive:!0}).pipe(m(hn),Q(hn()))}function vn(){return z([dn(),bn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(ee("size")),n=z([o,r]).pipe(m(()=>Ve(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Ma(e){return h(e,"message",t=>t.data)}function La(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function gn(e,t=new Worker(e)){let r=Ma(t),o=La(t),n=new g;n.subscribe(o);let i=o.pipe(Z(),ie(!0));return n.pipe(Z(),Re(r.pipe(U(i))),pe())}var _a=R("#__config"),Ot=JSON.parse(_a.textContent);Ot.base=`${new URL(Ot.base,ye())}`;function xe(){return Ot}function B(e){return Ot.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?Ot.translations[e].replace("#",t.toString()):Ot.translations[e]}function Se(e,t=document){return R(`[data-md-component=${e}]`,t)}function ae(e,t=document){return P(`[data-md-component=${e}]`,t)}function Aa(e){let t=R(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>R(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function yn(e){if(!B("announce.dismiss")||!e.childElementCount)return S;if(!e.hidden){let t=R(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),Aa(e).pipe(w(r=>t.next(r)),_(()=>t.complete()),m(r=>$({ref:e},r)))})}function Ca(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Ca(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))}function Rt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function En(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function wn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Tn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}var On=Lt(qr());function Qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,(0,On.default)(c))," "],[]).slice(0,-1),i=xe(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=xe();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Mn(e){let t=e[0].score,r=[...e],o=xe(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreQr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>Qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Ln(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Kr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function _n(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function $a(e){var o;let t=xe(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function An(e,t){var o;let r=xe();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map($a)))}var Pa=0;function Ra(e){let t=z([et(e),$t(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Xo(e)).pipe(ne(Ne),pt(1),He(t),m(()=>Zo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function Ia(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Pa++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(Z(),ie(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,kr)),K(),v(c=>c?r:S),w(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>$t(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),re(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),d=u.width/2;if(l.role==="tooltip")return{x:d,y:8+u.height};if(u.y>=f.height/2){let{height:y}=ce(l);return{x:d,y:-16-y}}else return{x:d,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),re(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(R(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),ve(me),re(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),Ra(e).pipe(w(c=>i.next(c)),_(()=>i.complete()),m(c=>$({ref:e},c)))})}function mt(e,{viewport$:t},r=document.body){return Ia(e,{content$:new j(o=>{let n=e.title,i=En(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function ja(e,t){let r=C(()=>z([en(e),Ne(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Cn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),O(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),h(n,"mousedown").pipe(U(a),re(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Ie())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),ja(e,t).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function Fa(e){return e.tagName==="CODE"?P(".c, .c1, .cm",e):[e]}function Wa(e){let t=[];for(let r of Fa(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function kn(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Wa(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,wn(p,i)),s.replaceWith(a.get(p)))}return a.size===0?S:C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=[];for(let[l,f]of a)c.push([R(".md-typeset",f),R(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?kn(f,u):kn(u,f)}),O(...[...a].map(([,l])=>Cn(l,t,{target$:r}))).pipe(_(()=>s.complete()),pe())})}function Hn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Hn(t)}}function $n(e,t){return C(()=>{let r=Hn(e);return typeof r!="undefined"?fr(r,e,t):S})}var Pn=Lt(Br());var Ua=0;function Rn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Rn(t)}}function Da(e){return ge(e).pipe(m(({width:t})=>({scrollable:St(e).width>t})),ee("scrollable"))}function In(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(jr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Pn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Ua++}`;let l=Tn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(mt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=Rn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:S)))}}return P(":scope > span[id]",e).length&&e.classList.add("md-code__content"),Da(e).pipe(w(c=>n.next(c)),_(()=>n.complete()),m(c=>$({ref:e},c)),Re(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Va(e,{target$:t,print$:r}){let o=!0;return O(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),w(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function jn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Va(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}var Fn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel p,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel p{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Gr,za=0;function qa(){return typeof mermaid=="undefined"||mermaid instanceof Element?Tt("https://unpkg.com/mermaid@11/dist/mermaid.min.js"):I(void 0)}function Wn(e){return e.classList.remove("mermaid"),Gr||(Gr=qa().pipe(w(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Fn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Gr.subscribe(()=>so(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${za++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Gr.pipe(m(()=>({ref:e})))}var Un=x("table");function Dn(e){return e.replaceWith(Un),Un.replaceWith(_n(e)),I({ref:e})}function Qa(e){let t=e.find(r=>r.checked)||e[0];return O(...e.map(r=>h(r,"change").pipe(m(()=>R(`label[for="${r.id}"]`))))).pipe(Q(R(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Vn(e,{viewport$:t,target$:r}){let o=R(".tabbed-labels",e),n=P(":scope > input",e),i=Kr("prev");e.append(i);let a=Kr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(Z(),ie(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ve(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let d=pr(o);(f.xd.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ne(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=St(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),O(h(i,"click").pipe(m(()=>-1)),h(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=R(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),h(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),w(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),re(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let y of P("[data-tabs]"))for(let M of P(":scope > input",y)){let X=R(`label[for="${M.id}"]`);if(X!==c&&X.innerText.trim()===f){X.setAttribute("data-md-switching",""),M.click();break}}window.scrollTo({top:e.offsetTop-u});let d=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...d])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of P("audio, video",e))c.pause()}),Qa(n).pipe(w(c=>s.next(c)),_(()=>s.complete()),m(c=>$({ref:e},c)))}).pipe(Ke(se))}function Nn(e,{viewport$:t,target$:r,print$:o}){return O(...P(".annotate:not(.highlight)",e).map(n=>$n(n,{target$:r,print$:o})),...P("pre:not(.mermaid) > code",e).map(n=>In(n,{target$:r,print$:o})),...P("pre.mermaid",e).map(n=>Wn(n)),...P("table:not([class])",e).map(n=>Dn(n)),...P("details",e).map(n=>jn(n,{target$:r,print$:o})),...P("[data-tabs]",e).map(n=>Vn(n,{viewport$:t,target$:r})),...P("[title]",e).filter(()=>B("content.tooltips")).map(n=>mt(n,{viewport$:t})))}function Ka(e,{alert$:t}){return t.pipe(v(r=>O(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function zn(e,t){let r=R(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ka(e,t).pipe(w(n=>o.next(n)),_(()=>o.complete()),m(n=>$({ref:e},n)))})}var Ya=0;function Ba(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?Ne(o):I({x:0,y:0}),i=O(et(t),$t(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ve(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function qn(e){let t=e.title;if(!t.length)return S;let r=`__tooltip_${Ya++}`,o=Rt(r,"inline"),n=R(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),O(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ba(o,e).pipe(w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))}).pipe(Ke(se))}function Ga({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Be(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=ze("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Qn(e,t){return C(()=>z([ge(e),Ga(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function Kn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(Z(),ie(!0));o.pipe(ee("active"),He(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue(P("[title]",e)).pipe(b(()=>B("content.tooltips")),ne(a=>qn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>$({ref:e},a)),Re(i.pipe(U(n))))})}function Ja(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),ee("active"))}function Yn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?S:Ja(o,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))})}function Bn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),ee("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Xa(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(ne(o=>h(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Gn(e){let t=P("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Pt("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),re(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(ve(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Xa(t).pipe(U(n.pipe(Ce(1))),ct(),w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))})}function Jn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(w(o=>r.next({value:o})),_(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Jr=Lt(Br());function Za(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Xn({alert$:e}){Jr.default.isSupported()&&new j(t=>{new Jr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Za(R(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(w(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Zn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function es(e,t){let r=new Map;for(let o of P("url",e)){let n=R("loc",o),i=[Zn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of P("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Zn(new URL(s),t))}}return r}function ur(e){return fn(new URL("sitemap.xml",e)).pipe(m(t=>es(t,new URL(e))),de(()=>I(new Map)))}function ts(e,t){if(!(e.target instanceof Element))return S;let r=e.target.closest("a");if(r===null)return S;if(r.target||e.metaKey||e.ctrlKey)return S;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):S}function ei(e){let t=new Map;for(let r of P(":scope > *",e.head))t.set(r.outerHTML,r);return t}function ti(e){for(let t of P("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function rs(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=ei(document);for(let[o,n]of ei(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return Ue(P("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new j(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),S}),Z(),ie(document))}function ri({location$:e,viewport$:t,progress$:r}){let o=xe();if(location.protocol==="file:")return S;let n=ur(o.base);I(document).subscribe(ti);let i=h(document.body,"click").pipe(He(n),v(([p,c])=>ts(p,c)),pe()),a=h(window,"popstate").pipe(m(ye),pe());i.pipe(re(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),O(i,a).subscribe(e);let s=e.pipe(ee("pathname"),v(p=>mn(p,{progress$:r}).pipe(de(()=>(lt(p,!0),S)))),v(ti),v(rs),pe());return O(s.pipe(re(e,(p,c)=>c)),s.pipe(v(()=>e),ee("pathname"),v(()=>e),ee("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),w(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",cn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(ee("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var oi=Lt(qr());function ni(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,oi.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function jt(e){return e.type===1}function dr(e){return e.type===3}function ii(e,t){let r=gn(e);return O(I(location.protocol!=="file:"),ze("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function ai({document$:e}){let t=xe(),r=je(new URL("../versions.json",t.base)).pipe(de(()=>S)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>h(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),re(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?S:(i.preventDefault(),I(p))}}return S}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=ye().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>lt(n,!0)),z([r,o]).subscribe(([n,i])=>{R(".md-header__topic").appendChild(An(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function is(e,{worker$:t}){let{searchParams:r}=ye();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),ze("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=ye();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=O(t.pipe(Ae(jt)),h(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function si(e,{worker$:t}){let r=new g,o=r.pipe(Z(),ie(!0));z([t.pipe(Ae(jt)),r],(i,a)=>a).pipe(ee("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(ee("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),h(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=R("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),is(e,{worker$:t}).pipe(w(i=>r.next(i)),_(()=>r.complete()),m(i=>$({ref:e},i)),G(1))}function ci(e,{worker$:t,query$:r}){let o=new g,n=rn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=R(":scope > :first-child",e),s=R(":scope > :last-child",e);ze("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(re(r),Ur(t.pipe(Ae(jt)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(w(()=>s.innerHTML=""),v(({items:l})=>O(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Be(4),Vr(n),v(([f])=>f)))),m(Mn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(ne(l=>{let f=fe("details",l);return typeof f=="undefined"?S:h(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(w(l=>o.next(l)),_(()=>o.complete()),m(l=>$({ref:e},l)))}function as(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=ye();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function pi(e,t){let r=new g,o=r.pipe(Z(),ie(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),as(e,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))}function li(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=O(h(n,"keydown"),h(n,"focus")).pipe(ve(se),m(()=>n.value),K());return o.pipe(He(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(w(s=>o.next(s)),_(()=>o.complete()),m(()=>({ref:e})))}function mi(e,{index$:t,keyboard$:r}){let o=xe();try{let n=ii(o.search,t),i=Se("search-query",e),a=Se("search-result",e);h(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Ie();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of P(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...P(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Ie()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=si(i,{worker$:n});return O(s,ci(a,{worker$:n,query$:s})).pipe(Re(...ae("search-share",e).map(p=>pi(p,{query$:s})),...ae("search-suggest",e).map(p=>li(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ye}}function fi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(ye()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>ni(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function ss(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Xr(e,o){var n=o,{header$:t}=n,r=ao(n,["header$"]);let i=R(".md-sidebar__scrollwrap",e),{y:a}=Ve(i);return C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=s.pipe(Le(0,me));return c.pipe(re(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of P(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2})}}}),ue(P("label[tabindex]",e)).pipe(ne(l=>h(l,"click").pipe(ve(se),m(()=>l),U(p)))).subscribe(l=>{let f=R(`[id="${l.htmlFor}"]`);R(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),ss(e,r).pipe(w(l=>s.next(l)),_(()=>s.complete()),m(l=>$({ref:e},l)))})}function ui(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return st(je(`${r}/releases/latest`).pipe(de(()=>S),m(o=>({version:o.tag_name})),De({})),je(r).pipe(de(()=>S),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return je(r).pipe(m(o=>({repositories:o.public_repos})),De({}))}}function di(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return st(je(`${r}/releases/permalink/latest`).pipe(de(()=>S),m(({tag_name:o})=>({version:o})),De({})),je(r).pipe(de(()=>S),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}function hi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return ui(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return di(r,o)}return S}var cs;function ps(e){return cs||(cs=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return S}return hi(e.href).pipe(w(o=>__md_set("__source",o,sessionStorage)))}).pipe(de(()=>S),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function bi(e){let t=R(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Ln(o)),t.classList.add("md-source__repository--active")}),ps(e).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),ee("hidden"))}function vi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):ls(e,t)).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function ms(e,{viewport$:t,header$:r}){let o=new Map,n=P(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(ee("height"),m(({height:s})=>{let p=Se("main"),c=R(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(ee("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),He(i),v(([p,c])=>t.pipe(Fr(([l,f],{offset:{y:u},size:d})=>{let y=u+d.height>=Math.floor(s.height);for(;f.length;){let[,M]=f[0];if(M-c=u&&!y)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Be(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(Z(),ie(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=O(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),He(o.pipe(ve(se))),re(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(U(a),ee("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),ct({delay:250}),re(i)).subscribe(([,{prev:s}])=>{let p=ye(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ms(e,{viewport$:t,header$:r}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function fs(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Be(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ie(!0),ct({delay:250}),m(a=>({hidden:a})))}function yi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),ee("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),h(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),fs(e,{viewport$:t,main$:o,target$:n}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(v(()=>P(".md-ellipsis")),ne(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?mt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),_(()=>n.removeAttribute("title"))):S})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>P(".md-status")),ne(r=>mt(r,{viewport$:t}))).subscribe()}function Ei({document$:e,tablet$:t}){e.pipe(v(()=>P(".md-toggle--indeterminate")),w(r=>{r.indeterminate=!0,r.checked=!1}),ne(r=>h(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),re(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function us(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function wi({document$:e}){e.pipe(v(()=>P("[data-md-scrollfix]")),w(t=>t.removeAttribute("data-md-scrollfix")),b(us),ne(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Ti({viewport$:e,tablet$:t}){z([ze("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),re(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ds(){return location.protocol==="file:"?Tt(`${new URL("search/search_index.js",Zr.base)}`).pipe(m(()=>__index),G(1)):je(new URL("search/search_index.json",Zr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Bo(),Wt=an(),Mt=pn(Wt),eo=nn(),Oe=vn(),hr=Pt("(min-width: 960px)"),Oi=Pt("(min-width: 1220px)"),Mi=ln(),Zr=xe(),Li=document.forms.namedItem("search")?ds():Ye,to=new g;Xn({alert$:to});var ro=new g;B("navigation.instant")&&ri({location$:Wt,viewport$:Oe,progress$:ro}).subscribe(ot);var Si;((Si=Zr.version)==null?void 0:Si.provider)==="mike"&&ai({document$:ot});O(Wt,Mt).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});eo.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&<(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&<(r);break;case"Enter":let o=Ie();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});Ei({document$:ot,tablet$:hr});wi({document$:ot});Ti({viewport$:Oe,tablet$:hr});var rt=Qn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Bn(e,{viewport$:Oe,header$:rt})),G(1)),hs=O(...ae("consent").map(e=>xn(e,{target$:Mt})),...ae("dialog").map(e=>zn(e,{alert$:to})),...ae("header").map(e=>Kn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Gn(e)),...ae("progress").map(e=>Jn(e,{progress$:ro})),...ae("search").map(e=>mi(e,{index$:Li,keyboard$:eo})),...ae("source").map(e=>bi(e))),bs=C(()=>O(...ae("announce").map(e=>yn(e)),...ae("content").map(e=>Nn(e,{viewport$:Oe,target$:Mt,print$:Mi})),...ae("content").map(e=>B("search.highlight")?fi(e,{index$:Li,location$:Wt}):S),...ae("header-title").map(e=>Yn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Oi,()=>Xr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Xr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>vi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Mt})),...ae("top").map(e=>yi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Mt})))),_i=ot.pipe(v(()=>bs),Re(hs),G(1));_i.subscribe();window.document$=ot;window.location$=Wt;window.target$=Mt;window.keyboard$=eo;window.viewport$=Oe;window.tablet$=hr;window.screen$=Oi;window.print$=Mi;window.alert$=to;window.progress$=ro;window.component$=_i;})(); +//# sourceMappingURL=bundle.56dfad97.min.js.map + diff --git a/assets/javascripts/bundle.56dfad97.min.js.map b/assets/javascripts/bundle.56dfad97.min.js.map new file mode 100644 index 00000000..eb83bdb3 --- /dev/null +++ b/assets/javascripts/bundle.56dfad97.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/tslib/tslib.es6.mjs", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/******************************************************************************\nCopyright (c) Microsoft Corporation.\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n***************************************************************************** */\n/* global Reflect, Promise, SuppressedError, Symbol, Iterator */\n\nvar extendStatics = function(d, b) {\n extendStatics = Object.setPrototypeOf ||\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\n return extendStatics(d, b);\n};\n\nexport function __extends(d, b) {\n if (typeof b !== \"function\" && b !== null)\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\n extendStatics(d, b);\n function __() { this.constructor = d; }\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\n}\n\nexport var __assign = function() {\n __assign = Object.assign || function __assign(t) {\n for (var s, i = 1, n = arguments.length; i < n; i++) {\n s = arguments[i];\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\n }\n return t;\n }\n return __assign.apply(this, arguments);\n}\n\nexport function __rest(s, e) {\n var t = {};\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\n t[p] = s[p];\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\n t[p[i]] = s[p[i]];\n }\n return t;\n}\n\nexport function __decorate(decorators, target, key, desc) {\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\n return c > 3 && r && Object.defineProperty(target, key, r), r;\n}\n\nexport function __param(paramIndex, decorator) {\n return function (target, key) { decorator(target, key, paramIndex); }\n}\n\nexport function __esDecorate(ctor, descriptorIn, decorators, contextIn, initializers, extraInitializers) {\n function accept(f) { if (f !== void 0 && typeof f !== \"function\") throw new TypeError(\"Function expected\"); return f; }\n var kind = contextIn.kind, key = kind === \"getter\" ? \"get\" : kind === \"setter\" ? \"set\" : \"value\";\n var target = !descriptorIn && ctor ? contextIn[\"static\"] ? ctor : ctor.prototype : null;\n var descriptor = descriptorIn || (target ? Object.getOwnPropertyDescriptor(target, contextIn.name) : {});\n var _, done = false;\n for (var i = decorators.length - 1; i >= 0; i--) {\n var context = {};\n for (var p in contextIn) context[p] = p === \"access\" ? {} : contextIn[p];\n for (var p in contextIn.access) context.access[p] = contextIn.access[p];\n context.addInitializer = function (f) { if (done) throw new TypeError(\"Cannot add initializers after decoration has completed\"); extraInitializers.push(accept(f || null)); };\n var result = (0, decorators[i])(kind === \"accessor\" ? { get: descriptor.get, set: descriptor.set } : descriptor[key], context);\n if (kind === \"accessor\") {\n if (result === void 0) continue;\n if (result === null || typeof result !== \"object\") throw new TypeError(\"Object expected\");\n if (_ = accept(result.get)) descriptor.get = _;\n if (_ = accept(result.set)) descriptor.set = _;\n if (_ = accept(result.init)) initializers.unshift(_);\n }\n else if (_ = accept(result)) {\n if (kind === \"field\") initializers.unshift(_);\n else descriptor[key] = _;\n }\n }\n if (target) Object.defineProperty(target, contextIn.name, descriptor);\n done = true;\n};\n\nexport function __runInitializers(thisArg, initializers, value) {\n var useValue = arguments.length > 2;\n for (var i = 0; i < initializers.length; i++) {\n value = useValue ? initializers[i].call(thisArg, value) : initializers[i].call(thisArg);\n }\n return useValue ? value : void 0;\n};\n\nexport function __propKey(x) {\n return typeof x === \"symbol\" ? x : \"\".concat(x);\n};\n\nexport function __setFunctionName(f, name, prefix) {\n if (typeof name === \"symbol\") name = name.description ? \"[\".concat(name.description, \"]\") : \"\";\n return Object.defineProperty(f, \"name\", { configurable: true, value: prefix ? \"\".concat(prefix, \" \", name) : name });\n};\n\nexport function __metadata(metadataKey, metadataValue) {\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\n}\n\nexport function __awaiter(thisArg, _arguments, P, generator) {\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\n return new (P || (P = Promise))(function (resolve, reject) {\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\n step((generator = generator.apply(thisArg, _arguments || [])).next());\n });\n}\n\nexport function __generator(thisArg, body) {\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === \"function\" ? Iterator : Object).prototype);\n return g.next = verb(0), g[\"throw\"] = verb(1), g[\"return\"] = verb(2), typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\n function verb(n) { return function (v) { return step([n, v]); }; }\n function step(op) {\n if (f) throw new TypeError(\"Generator is already executing.\");\n while (g && (g = 0, op[0] && (_ = 0)), _) try {\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\n if (y = 0, t) op = [op[0] & 2, t.value];\n switch (op[0]) {\n case 0: case 1: t = op; break;\n case 4: _.label++; return { value: op[1], done: false };\n case 5: _.label++; y = op[1]; op = [0]; continue;\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\n default:\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\n if (t[2]) _.ops.pop();\n _.trys.pop(); continue;\n }\n op = body.call(thisArg, _);\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\n }\n}\n\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n var desc = Object.getOwnPropertyDescriptor(m, k);\n if (!desc || (\"get\" in desc ? !m.__esModule : desc.writable || desc.configurable)) {\n desc = { enumerable: true, get: function() { return m[k]; } };\n }\n Object.defineProperty(o, k2, desc);\n}) : (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n o[k2] = m[k];\n});\n\nexport function __exportStar(m, o) {\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\n}\n\nexport function __values(o) {\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\n if (m) return m.call(o);\n if (o && typeof o.length === \"number\") return {\n next: function () {\n if (o && i >= o.length) o = void 0;\n return { value: o && o[i++], done: !o };\n }\n };\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\n}\n\nexport function __read(o, n) {\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\n if (!m) return o;\n var i = m.call(o), r, ar = [], e;\n try {\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\n }\n catch (error) { e = { error: error }; }\n finally {\n try {\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\n }\n finally { if (e) throw e.error; }\n }\n return ar;\n}\n\n/** @deprecated */\nexport function __spread() {\n for (var ar = [], i = 0; i < arguments.length; i++)\n ar = ar.concat(__read(arguments[i]));\n return ar;\n}\n\n/** @deprecated */\nexport function __spreadArrays() {\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\n r[k] = a[j];\n return r;\n}\n\nexport function __spreadArray(to, from, pack) {\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\n if (ar || !(i in from)) {\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\n ar[i] = from[i];\n }\n }\n return to.concat(ar || Array.prototype.slice.call(from));\n}\n\nexport function __await(v) {\n return this instanceof __await ? (this.v = v, this) : new __await(v);\n}\n\nexport function __asyncGenerator(thisArg, _arguments, generator) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\n return i = Object.create((typeof AsyncIterator === \"function\" ? AsyncIterator : Object).prototype), verb(\"next\"), verb(\"throw\"), verb(\"return\", awaitReturn), i[Symbol.asyncIterator] = function () { return this; }, i;\n function awaitReturn(f) { return function (v) { return Promise.resolve(v).then(f, reject); }; }\n function verb(n, f) { if (g[n]) { i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; if (f) i[n] = f(i[n]); } }\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\n function fulfill(value) { resume(\"next\", value); }\n function reject(value) { resume(\"throw\", value); }\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\n}\n\nexport function __asyncDelegator(o) {\n var i, p;\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: false } : f ? f(v) : v; } : f; }\n}\n\nexport function __asyncValues(o) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var m = o[Symbol.asyncIterator], i;\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\n}\n\nexport function __makeTemplateObject(cooked, raw) {\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\n return cooked;\n};\n\nvar __setModuleDefault = Object.create ? (function(o, v) {\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\n}) : function(o, v) {\n o[\"default\"] = v;\n};\n\nexport function __importStar(mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\n __setModuleDefault(result, mod);\n return result;\n}\n\nexport function __importDefault(mod) {\n return (mod && mod.__esModule) ? mod : { default: mod };\n}\n\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\n}\n\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\n}\n\nexport function __classPrivateFieldIn(state, receiver) {\n if (receiver === null || (typeof receiver !== \"object\" && typeof receiver !== \"function\")) throw new TypeError(\"Cannot use 'in' operator on non-object\");\n return typeof state === \"function\" ? receiver === state : state.has(receiver);\n}\n\nexport function __addDisposableResource(env, value, async) {\n if (value !== null && value !== void 0) {\n if (typeof value !== \"object\" && typeof value !== \"function\") throw new TypeError(\"Object expected.\");\n var dispose, inner;\n if (async) {\n if (!Symbol.asyncDispose) throw new TypeError(\"Symbol.asyncDispose is not defined.\");\n dispose = value[Symbol.asyncDispose];\n }\n if (dispose === void 0) {\n if (!Symbol.dispose) throw new TypeError(\"Symbol.dispose is not defined.\");\n dispose = value[Symbol.dispose];\n if (async) inner = dispose;\n }\n if (typeof dispose !== \"function\") throw new TypeError(\"Object not disposable.\");\n if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };\n env.stack.push({ value: value, dispose: dispose, async: async });\n }\n else if (async) {\n env.stack.push({ async: true });\n }\n return value;\n}\n\nvar _SuppressedError = typeof SuppressedError === \"function\" ? SuppressedError : function (error, suppressed, message) {\n var e = new Error(message);\n return e.name = \"SuppressedError\", e.error = error, e.suppressed = suppressed, e;\n};\n\nexport function __disposeResources(env) {\n function fail(e) {\n env.error = env.hasError ? new _SuppressedError(e, env.error, \"An error was suppressed during disposal.\") : e;\n env.hasError = true;\n }\n var r, s = 0;\n function next() {\n while (r = env.stack.pop()) {\n try {\n if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);\n if (r.dispose) {\n var result = r.dispose.call(r.value);\n if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });\n }\n else s |= 1;\n }\n catch (e) {\n fail(e);\n }\n }\n if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();\n if (env.hasError) throw env.error;\n }\n return next();\n}\n\nexport default {\n __extends,\n __assign,\n __rest,\n __decorate,\n __param,\n __metadata,\n __awaiter,\n __generator,\n __createBinding,\n __exportStar,\n __values,\n __read,\n __spread,\n __spreadArrays,\n __spreadArray,\n __await,\n __asyncGenerator,\n __asyncDelegator,\n __asyncValues,\n __makeTemplateObject,\n __importStar,\n __importDefault,\n __classPrivateFieldGet,\n __classPrivateFieldSet,\n __classPrivateFieldIn,\n __addDisposableResource,\n __disposeResources,\n};\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
    \n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an