Skip to content

Commit

Permalink
Resolve linting errors in the javadoc contents (airbytehq#7612)
Browse files Browse the repository at this point in the history
* Javadoc cleanup
  • Loading branch information
airbyte-jenny authored Nov 11, 2021
1 parent 4e4f58a commit fcb2ff4
Show file tree
Hide file tree
Showing 15 changed files with 43 additions and 24 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
.gradle
.idea
*.iml
*.swp
build
out
.DS_Store
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,14 @@
* to.
*
* Lifecycle:
* <ul>
* <li>1. Instantiate consumer.</li>
* <li>2. start() to initialize any resources that need to be created BEFORE the consumer consumes
* any messages.</li>
* <li>3. Consumes ALL records via {@link AirbyteMessageConsumer#accept(AirbyteMessage)}</li>
* <li>4. Always (on success or failure) finalize by calling
* {@link AirbyteMessageConsumer#close()}</li>
*
* </ul>
* We encourage implementing this interface using the {@link FailureTrackingAirbyteMessageConsumer}
* class.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,10 @@
/**
* This class handles reading and writing a debezium offset file. In many cases it is duplicating
* logic in debezium because that logic is not exposed in the public API. We mostly treat the
* contents of this state file like a black box. We know it is a Map<ByteBuffer, Bytebuffer>. We
* deserialize it to a Map<String, String> so that the state file can be human readable. If we ever
* discover that any of the contents of these offset files is not string serializable we will likely
* have to drop the human readability support and just base64 encode it.
* contents of this state file like a black box. We know it is a Map&lt;ByteBuffer, Bytebuffer&gt;.
* We deserialize it to a Map&lt;String, String&gt; so that the state file can be human readable. If
* we ever discover that any of the contents of these offset files is not string serializable we
* will likely have to drop the human readability support and just base64 encode it.
*/
public class AirbyteFileOffsetBackingStore {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@
* The purpose of this class is : to , 1. Read the contents of the file {@link #path} which contains
* the schema history at the end of the sync so that it can be saved in state for future syncs.
* Check {@link #read()} 2. Write the saved content back to the file {@link #path} at the beginning
* of the sync so that debezium can function smoothly. Check {@link #persist(Optional<JsonNode>)}.
* To understand more about file, please refer {@link FilteredFileDatabaseHistory}
* of the sync so that debezium can function smoothly. Check persist(Optional&lt;JsonNode&gt;). To
* understand more about file, please refer {@link FilteredFileDatabaseHistory}
*/
public class AirbyteSchemaHistoryStorage {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
* https://debezium.io/documentation/reference/1.4/development/converters.html This is built from
* reference with {@link io.debezium.connector.mysql.converters.TinyIntOneToBooleanConverter} If you
* rename this class then remember to rename the datetime.type property value in
* {@link io.airbyte-integrations.source.mysql.MySqlCdcProperties#getDebeziumProperties()} (If you
* don't rename, a test would still fail but it might be tricky to figure out where to change the
* property name)
* io.airbyte-integrations.source.mysql.MySqlCdcProperties#getDebeziumProperties() (If you don't
* rename, a test would still fail but it might be tricky to figure out where to change the property
* name)
*/
public class MySQLConverter implements CustomConverter<SchemaBuilder, RelationalColumn> {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,11 @@

/**
* The base implementation takes care of the following:
* <ul>
* <li>Create shared instance variables.</li>
* <li>Create the bucket and prepare the bucket path.</li>
* <li>Log and close the write.</li>
* </ul>
*/
public abstract class BaseAzureBlobStorageWriter implements AzureBlobStorageWriter {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ protected JsonNode formatRecord(final Schema schema, final AirbyteRecordMessage
}

@Override
public void close(boolean hasFailed) {
public void close(final boolean hasFailed) {
fieldsWithRefDefinition.clear();
super.close(hasFailed);
}
Expand All @@ -86,7 +86,7 @@ protected JsonNode formatData(final FieldList fields, final JsonNode root) {
if (fields == null) {
return root;
}
List<String> dateTimeFields = BigQueryUtils.getDateTimeFieldsFromSchema(fields);
final List<String> dateTimeFields = BigQueryUtils.getDateTimeFieldsFromSchema(fields);
if (!dateTimeFields.isEmpty()) {
BigQueryUtils.transformJsonDateTimeToBigDataFormat(dateTimeFields, (ObjectNode) root);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -281,8 +281,8 @@ private Set<String> extractJsonValues(final JsonNode node, final String attribut
return resultSet;
}

private JsonNode removeAirbyteMetadataFields(JsonNode record) {
for (String airbyteMetadataField : AIRBYTE_METADATA_FIELDS) {
private JsonNode removeAirbyteMetadataFields(final JsonNode record) {
for (final String airbyteMetadataField : AIRBYTE_METADATA_FIELDS) {
((ObjectNode) record).remove(airbyteMetadataField);
}
return record;
Expand Down Expand Up @@ -311,7 +311,7 @@ private static Stream<Arguments> schemaAndDataProvider() {
arguments(getSchema(), MESSAGE_USERS2));
}

private static AirbyteMessage createRecordMessage(String stream, JsonNode data) {
private static AirbyteMessage createRecordMessage(final String stream, final JsonNode data) {
return new AirbyteMessage().withType(AirbyteMessage.Type.RECORD)
.withRecord(new AirbyteRecordMessage().withStream(stream)
.withData(data)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,18 @@
* This implementation is similar to
* {@link io.airbyte.integrations.destination.jdbc.copy.s3.S3StreamCopier}. The difference is that
* this implementation creates Parquet staging files, instead of CSV ones.
* <p/>
* <p>
* </p>
* It does the following operations:
* <ul>
* <li>1. Parquet writer writes data stream into staging parquet file in
* s3://<bucket-name>/<bucket-path>/<staging-folder>.</li>
* s3://bucket-name/bucket-path/staging-folder.</li>
* <li>2. Create a tmp delta table based on the staging parquet file.</li>
* <li>3. Create the destination delta table based on the tmp delta table schema in
* s3://<bucket>/<stream-name>.</li>
* s3://bucket/stream-name.</li>
* <li>4. Copy the staging parquet file into the destination delta table.</li>
* <li>5. Delete the tmp delta table, and the staging parquet file.</li>
* </ul>
*/
public class DatabricksStreamCopier implements StreamCopier {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,10 @@

/**
* The base implementation takes care of the following:
* <ul>
* <li>Create shared instance variables.</li>
* <li>Create the bucket and prepare the bucket path.</li>
* </ul>
*/
public abstract class BaseGcsWriter implements S3Writer {

Expand All @@ -52,8 +54,10 @@ protected BaseGcsWriter(final GcsDestinationConfig config,
}

/**
* <ul>
* <li>1. Create bucket if necessary.</li>
* <li>2. Under OVERWRITE mode, delete all objects with the output prefix.</li>
* </ul>
*/
@Override
public void initialize() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@
* The main function of this class is to convert a JsonSchema to Avro schema. It can also
* standardize schema names, and keep track of a mapping from the original names to the standardized
* ones, which is needed for unit tests.
* <p/>
* <p>
* </p>
* For limitations of this converter, see the README of this connector:
* https://docs.airbyte.io/integrations/destinations/s3#avro
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,11 @@ public static JsonFieldNameUpdater getFieldNameUpdater(final String streamName,

/**
* Convert an Airbyte JsonNode from Avro / Parquet Record to a plain one.
* <ul>
* <li>Remove the airbyte id and emission timestamp fields.</li>
* <li>Remove null fields that must exist in Parquet but does not in original Json.</li> This
* function mutates the input Json.
* <li>Remove null fields that must exist in Parquet but does not in original Json. This function
* mutates the input Json.</li>
* </ul>
*/
public static JsonNode pruneAirbyteJson(final JsonNode input) {
final ObjectNode output = (ObjectNode) input;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@ public static String getOutputPrefix(final String bucketPath, final AirbyteStrea
}

/**
* Prefix: <bucket-path>/<source-namespace-if-present>/<stream-name>
* Prefix: &lt;bucket-path&gt;/&lt;source-namespace-if-present&gt;/&lt;stream-name&gt;
*/
// Prefix: <bucket-path>/<source-namespace-if-present>/<stream-name>
public static String getOutputPrefix(final String bucketPath, final String namespace, final String streamName) {
final List<String> paths = new LinkedList<>();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,11 @@

/**
* The base implementation takes care of the following:
* <ul>
* <li>Create shared instance variables.</li>
* <li>Create the bucket and prepare the bucket path.</li>
* <li>Log and close the write.</li>
* </ul>
*/
public abstract class BaseS3Writer implements S3Writer {

Expand All @@ -57,8 +59,10 @@ public String getOutputPrefix() {
}

/**
* <ul>
* <li>1. Create bucket if necessary.</li>
* <li>2. Under OVERWRITE mode, delete all objects with the output prefix.</li>
* </ul>
*/
@Override
public void initialize() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
* This class contains helper functions and boilerplate for implementing a source connector for a
* relational DB source.
*
* @see io.airbyte.integrations.source.jdbc.AbstractJdbcSource if you are implementing a relational
* DB which can be accessed via JDBC driver.
* see io.airbyte.integrations.source.jdbc.AbstractJdbcSource if you are implementing a relational
* DB which can be accessed via JDBC driver.
*/
public abstract class AbstractRelationalDbSource<DataType, Database extends SqlDatabase> extends
AbstractDbSource<DataType, Database> implements Source {
Expand Down

0 comments on commit fcb2ff4

Please sign in to comment.