Skip to content

Commit

Permalink
Improve and add TODO
Browse files Browse the repository at this point in the history
  • Loading branch information
azexcy committed Oct 16, 2023
1 parent 44a5608 commit d175ee1
Showing 1 changed file with 38 additions and 40 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -105,10 +105,7 @@ private PipelineJobProgressUpdatedParameter flush(final DataSource dataSource, f
flushInternal(dataSource, each.getBatchDeleteDataRecords());
flushInternal(dataSource, each.getBatchInsertDataRecords());
flushInternal(dataSource, each.getBatchUpdateDataRecords());
if (each.getNonBatchRecords().isEmpty()) {
continue;
}
tryFlush(dataSource, each.getNonBatchRecords(), false);
sequentialFlush(dataSource, each.getNonBatchRecords());
}
return new PipelineJobProgressUpdatedParameter(insertRecordNumber);
}
Expand All @@ -117,14 +114,14 @@ private void flushInternal(final DataSource dataSource, final List<DataRecord> b
if (null == buffer || buffer.isEmpty()) {
return;
}
tryFlush(dataSource, buffer, true);
tryFlush(dataSource, buffer);
}

@SneakyThrows(InterruptedException.class)
private void tryFlush(final DataSource dataSource, final List<DataRecord> buffer, final boolean enableTransaction) {
private void tryFlush(final DataSource dataSource, final List<DataRecord> buffer) {
for (int i = 0; !Thread.interrupted() && i <= importerConfig.getRetryTimes(); i++) {
try {
doFlush(dataSource, buffer, enableTransaction);
doFlush(dataSource, buffer);
return;
} catch (final SQLException ex) {
log.error("flush failed {}/{} times.", i, importerConfig.getRetryTimes(), ex);
Expand All @@ -136,50 +133,40 @@ private void tryFlush(final DataSource dataSource, final List<DataRecord> buffer
}
}

private void doFlush(final DataSource dataSource, final List<DataRecord> buffer, final boolean enableTransaction) throws SQLException {
private void doFlush(final DataSource dataSource, final List<DataRecord> buffer) throws SQLException {
try (Connection connection = dataSource.getConnection()) {
boolean enableTransaction = buffer.size() > 1;
if (enableTransaction) {
connection.setAutoCommit(false);
}
Set<String> dataRecordTypes = buffer.stream().map(DataRecord::getType).collect(Collectors.toSet());
if (dataRecordTypes.size() > 1) {
for (DataRecord each : buffer) {
execute(connection, Collections.singletonList(each));
}
} else {
execute(connection, buffer);
switch (buffer.get(0).getType()) {
case IngestDataChangeType.INSERT:
if (null != rateLimitAlgorithm) {
rateLimitAlgorithm.intercept(JobOperationType.INSERT, 1);
}
executeBatchInsert(connection, buffer);
break;
case IngestDataChangeType.UPDATE:
if (null != rateLimitAlgorithm) {
rateLimitAlgorithm.intercept(JobOperationType.UPDATE, 1);
}
executeUpdate(connection, buffer);
break;
case IngestDataChangeType.DELETE:
if (null != rateLimitAlgorithm) {
rateLimitAlgorithm.intercept(JobOperationType.DELETE, 1);
}
executeBatchDelete(connection, buffer);
break;
default:
break;
}
if (enableTransaction) {
connection.commit();
}
}
}

private void execute(final Connection connection, final List<DataRecord> buffer) throws SQLException {
switch (buffer.get(0).getType()) {
case IngestDataChangeType.INSERT:
if (null != rateLimitAlgorithm) {
rateLimitAlgorithm.intercept(JobOperationType.INSERT, 1);
}
executeBatchInsert(connection, buffer);
break;
case IngestDataChangeType.UPDATE:
if (null != rateLimitAlgorithm) {
rateLimitAlgorithm.intercept(JobOperationType.UPDATE, 1);
}
executeUpdate(connection, buffer);
break;
case IngestDataChangeType.DELETE:
if (null != rateLimitAlgorithm) {
rateLimitAlgorithm.intercept(JobOperationType.DELETE, 1);
}
executeBatchDelete(connection, buffer);
break;
default:
break;
}
}

private void executeBatchInsert(final Connection connection, final List<DataRecord> dataRecords) throws SQLException {
DataRecord dataRecord = dataRecords.get(0);
String insertSql = importSQLBuilder.buildInsertSQL(getSchemaName(dataRecord.getTableName()), dataRecord);
Expand Down Expand Up @@ -261,6 +248,17 @@ private void executeBatchDelete(final Connection connection, final List<DataReco
}
}

private void sequentialFlush(final DataSource dataSource, final List<DataRecord> buffer) {
// TODO it's better use transaction, but execute delete maybe not effect when open transaction of PostgreSQL sometimes
try {
for (DataRecord each : buffer) {
doFlush(dataSource, Collections.singletonList(each));
}
} catch (final SQLException ex) {
throw new PipelineImporterJobWriteException(ex);
}
}

@Override
public void close() {
PipelineJdbcUtils.cancelStatement(batchInsertStatement.get());
Expand Down

0 comments on commit d175ee1

Please sign in to comment.