Skip to content

Commit

Permalink
Merge pull request #44 from shrutimantri/doc_correction_1
Browse files Browse the repository at this point in the history
fix(docs + code): corrections to docs for plugin-debezium
  • Loading branch information
anna-geller authored Jan 5, 2024
2 parents 3b146f4 + f142196 commit 49e9bb1
Show file tree
Hide file tree
Showing 10 changed files with 71 additions and 68 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,16 @@
@Getter
@NoArgsConstructor
@Schema(
title = "Wait for change data capture event on MySQL server"
title = "Wait for change data capture event on MySQL server."
)
@Plugin(
examples = {
@Example(
code = {
"snapshotMode: NEVER",
"hostname: 127.0.0.1",
"port: 63306",
"username: root",
"port: \"3306\"",
"username: mysql_user",
"password: mysql_passwd",
"maxRecords: 100",
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,20 @@ public interface MysqlInterface {
@Schema(
title = "Specifies the criteria for running a snapshot when the connector starts.",
description = " Possible settings are:\n" +
"- `INITIAL`: the connector runs a snapshot only when no offsets have been recorded for the logical server name.\n" +
"- `INITIAL_ONLY`: the connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.\n" +
"- `WHEN_NEEDED`: the connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server.\n" +
"- `NEVER`: - the connector never uses snapshots. Upon first startup with a logical server name, the connector reads from the beginning of the binlog. Configure this behavior with care. It is valid only when the binlog is guaranteed to contain the entire history of the database.\n" +
"- `SCHEMA_ONLY`: the connector runs a snapshot of the schemas and not the data. This setting is useful when you do not need the topics to contain a consistent snapshot of the data but need them to have only the changes since the connector was started.\n" +
"- `SCHEMA_ONLY_RECOVERY`: this is a recovery setting for a connector that has already been capturing changes. When you restart the connector, this setting enables recovery of a corrupted or lost database history topic. You might set it periodically to \"clean up\" a database history topic that has been growing unexpectedly. Database history topics require infinite retention."
"- `INITIAL`: The connector runs a snapshot only when no offsets have been recorded for the logical server name.\n" +
"- `INITIAL_ONLY`: The connector runs a snapshot only when no offsets have been recorded for the logical server name and then stops; i.e. it will not read change events from the binlog.\n" +
"- `WHEN_NEEDED`: The connector runs a snapshot upon startup whenever it deems it necessary. That is, when no offsets are available, or when a previously recorded offset specifies a binlog location or GTID that is not available in the server.\n" +
"- `NEVER`: The connector never uses snapshots. Upon first startup with a logical server name, the connector reads from the beginning of the binlog. Configure this behavior with care. It is valid only when the binlog is guaranteed to contain the entire history of the database.\n" +
"- `SCHEMA_ONLY`: The connector runs a snapshot of the schemas and not the data. This setting is useful when you do not need the topics to contain a consistent snapshot of the data but need them to have only the changes since the connector was started.\n" +
"- `SCHEMA_ONLY_RECOVERY`: This is a recovery setting for a connector that has already been capturing changes. When you restart the connector, this setting enables recovery of a corrupted or lost database history topic. You might set it periodically to \"clean up\" a database history topic that has been growing unexpectedly. Database history topics require infinite retention."
)
@PluginProperty(dynamic = false)
@NotNull
SnapshotMode getSnapshotMode();

@Schema(
title = "A numeric ID of this database client.",
description = "which must be unique across all currently-running database processes in the MySQL cluster. " +
description = "This must be unique across all currently-running database processes in the MySQL cluster. " +
"This connector joins the MySQL database cluster as another server (with this unique ID) so it can read " +
"the binlog. By default, a random number between 5400 and 6400 is generated, though the recommendation " +
"is to explicitly set a value."
Expand All @@ -38,6 +38,7 @@ public enum SnapshotMode {
INITIAL_ONLY,
WHEN_NEEDED,
NEVER,
SCHEMA_ONLY
SCHEMA_ONLY,
SCHEMA_ONLY_RECOVERY
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,16 @@
@Getter
@NoArgsConstructor
@Schema(
title = "Wait for change data capture event on MySQL server and create new execution"
title = "Wait for change data capture event on MySQL server and create new execution."
)
@Plugin(
examples = {
@Example(
code = {
"snapshotMode: NEVER",
"hostname: 127.0.0.1",
"port: 63306",
"username: root",
"port: \"3306\"",
"username: mysql_user",
"password: mysql_passwd",
"maxRecords: 100",
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,15 @@
@Getter
@NoArgsConstructor
@Schema(
title = "Wait for change data capture event on PostgresSQL server"
title = "Wait for change data capture event on PostgreSQL server."
)
@Plugin(
examples = {
@Example(
code = {
"hostname: 127.0.0.1",
"port: 5432",
"username: posgres",
"port: \"5432\"",
"username: psql_user",
"password: psql_passwd",
"maxRecords: 100",
"database: my_database",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ public interface PostgresInterface {
PluginName getPluginName();

@Schema(
title = "The name of the PostgreSQL publication created for streaming changes when using `pgoutput`.",
title = "The name of the PostgreSQL publication created for streaming changes when using `PGOUTPUT`.",
description = "This publication is created at start-up if it does not already exist and it includes all tables. " +
"Debezium then applies its own include/exclude list filtering, if configured, to limit the publication to " +
"change events for the specific tables of interest. The connector user must have superuser permissions to " +
Expand Down Expand Up @@ -55,29 +55,29 @@ public interface PostgresInterface {
"- `DISABLE` uses an unencrypted connection.\n" +
"- `REQUIRE` uses a secure (encrypted) connection, and fails if one cannot be established.\n" +
"- `VERIFY_CA` behaves like require but also verifies the server TLS certificate against the configured Certificate Authority (CA) certificates, or fails if no valid matching CA certificates are found.\n" +
"- `VERIFY_FULL` behaves like verify-ca but also verifies that the server certificate matches the host to which the connector is trying to connect. \n\n" +
"- `VERIFY_FULL` behaves like verify-ca but also verifies that the server certificate matches the host to which the connector is trying to connect.\n\n" +
"See the [PostgreSQL documentation](https://www.postgresql.org/docs/current/static/libpq-connect.html) for more information."
)
@PluginProperty(dynamic = false)
SslMode getSslMode();

@Schema(
title = "The root certificate(s) against which the server is validated.",
description = "Must be a PEM encoded certificate"
description = "Must be a PEM encoded certificate."
)
@PluginProperty(dynamic = true)
String getSslRootCert();

@Schema(
title = "The SSL certificate for the client.",
description = "Must be a PEM encoded certificate"
description = "Must be a PEM encoded certificate."
)
@PluginProperty(dynamic = true)
String getSslCert();

@Schema(
title = "The SSL private key of the client.",
description = "Must be a PEM encoded key"
description = "Must be a PEM encoded key."
)
@PluginProperty(dynamic = true)
String getSslKey();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,14 @@
@Getter
@NoArgsConstructor
@Schema(
title = "Wait for change data capture event on PostgresSQL server and create new execution"
title = "Wait for change data capture event on PostgreSQL server and create new execution."
)
@Plugin(
examples = {
@Example(
code = {
"hostname: 127.0.0.1",
"port: 5432",
"port: \"5432\"",
"username: posgres",
"password: psql_passwd",
"maxRecords: 100",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,16 @@
@Getter
@NoArgsConstructor
@Schema(
title = "Wait for change data capture event on Microsoft SQL server"
title = "Wait for change data capture event on Microsoft SQL Server."
)
@Plugin(
examples = {
@Example(
code = {
"snapshotMode: INITIAL",
"hostname: 127.0.0.1",
"port: 1433",
"username: sa",
"port: \"1433\"",
"username: sqlserver_user",
"password: sqlserver_passwd",
"maxRecords: 100",
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

public interface SqlServerInterface {
@Schema(
title = "The name of the PostgreSQL database from which to stream the changes."
title = "The name of the Microsoft SQL Server database from which to stream the changes."
)
@PluginProperty(dynamic = true)
@NotNull
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,16 @@
@Getter
@NoArgsConstructor
@Schema(
title = "Wait for change data capture event on Microsoft SQL server and create new execution"
title = "Wait for change data capture event on Microsoft SQL Server and create new execution."
)
@Plugin(
examples = {
@Example(
code = {
"snapshotMode: INITIAL",
"hostname: 127.0.0.1",
"port: 1433",
"username: sa",
"port: \"1433\"",
"username: sqlserver_user",
"password: sqlserver_passwd",
"database: deb",
"maxRecords: 100",
Expand Down
Loading

0 comments on commit 49e9bb1

Please sign in to comment.