Skip to content

List offsets api implemented #156

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Dec 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,12 @@
# confluent-kafka-javascript v1.0.0

v1.0.0 is a feature release. It is supported for all usage.

## Enhancements

1. Add support for an Admin API to fetch topic offsets(#156).


# confluent-kafka-javascript v0.6.1

v0.6.1 is a limited availability maintenance release. It is supported for all usage.
Expand Down
2 changes: 2 additions & 0 deletions MIGRATION.md
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,8 @@ The admin-client only has support for a limited subset of methods, with more to
and `operationTimeout` option.
* The `fetchTopicMetadata` method is supported with additional `timeout`
and `includeAuthorizedOperations` option. Fetching for all topics is not advisable.
* The `fetchTopicOffsets` method is supported with additional `timeout`
and `isolationLevel` option.

### Using the Schema Registry

Expand Down
77 changes: 77 additions & 0 deletions examples/kafkajs/admin/fetch-topic-offsets.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
const { Kafka, IsolationLevel } = require('@confluentinc/kafka-javascript').KafkaJS;
const { parseArgs } = require('node:util');

async function fetchOffsets() {
// Parse command-line arguments
const args = parseArgs({
allowPositionals: true,
options: {
'bootstrap-servers': {
type: 'string',
short: 'b',
default: 'localhost:9092',
},
'timeout': {
type: 'string',
short: 't',
default: '5000',
},
'isolation-level': {
type: 'string',
short: 'i',
default: '0', // Default to '0' for read_uncommitted
},
},
});

const {
'bootstrap-servers': bootstrapServers,
timeout,
'isolation-level': isolationLevel,
} = args.values;

const [topic] = args.positionals;

if (!topic) {
console.error('Topic name is required');
process.exit(1);
}

// Determine the isolation level
let isolationLevelValue;
if (isolationLevel === '0') {
isolationLevelValue = IsolationLevel.READ_UNCOMMITTED;
} else if (isolationLevel === '1') {
isolationLevelValue = IsolationLevel.READ_COMMITTED;
} else {
console.error('Invalid isolation level. Use 0 for READ_UNCOMMITTED or 1 for READ_COMMITTED.');
process.exit(1);
}

const kafka = new Kafka({
kafkaJS: {
brokers: [bootstrapServers],
},
});

const admin = kafka.admin();
await admin.connect();

try {
// Fetch offsets for the specified topic
const offsets = await admin.fetchTopicOffsets(
topic,
{
isolationLevel: isolationLevelValue, // Use determined isolation level
timeout: Number(timeout), // Convert timeout to a number
});

console.log(`Offsets for topic "${topic}":`, JSON.stringify(offsets, null, 2));
} catch (err) {
console.error('Error fetching topic offsets:', err);
} finally {
await admin.disconnect();
}
}

fetchOffsets();
83 changes: 83 additions & 0 deletions lib/admin.js
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,52 @@ const AclOperationTypes = Object.seal({
IDEMPOTENT_WRITE: 12,
});

/**
* A list of isolation levels.
* @enum {number}
* @readonly
* @memberof RdKafka
*/
const IsolationLevel = {
READ_UNCOMMITTED: 0,
READ_COMMITTED: 1,
};

/**
* Define an OffsetSpec to list offsets at.
* Either a timestamp can be used, or else, one of the special, pre-defined values
* (EARLIEST, LATEST, MAX_TIMESTAMP) can be used while passing an OffsetSpec to listOffsets.
* @param {number} timestamp - The timestamp to list offsets at.
* @constructor
*/
function OffsetSpec(timestamp) {
this.timestamp = timestamp;
}

/**
* Specific OffsetSpec value used to retrieve the offset with the largest timestamp of a partition
* as message timestamps can be specified client side this may not match
* the log end offset returned by OffsetSpec.LATEST.
*/
OffsetSpec.MAX_TIMESTAMP = new OffsetSpec(-3);

/**
* Special OffsetSpec value denoting the earliest offset for a topic partition.
*/
OffsetSpec.EARLIEST = new OffsetSpec(-2);

/**
* Special OffsetSpec value denoting the latest offset for a topic partition.
*/
OffsetSpec.LATEST = new OffsetSpec(-1);

module.exports = {
create: createAdminClient,
createFrom: createAdminClientFrom,
ConsumerGroupStates,
AclOperationTypes,
IsolationLevel: Object.freeze(IsolationLevel),
OffsetSpec,
};

var Client = require('./client');
Expand Down Expand Up @@ -609,3 +650,45 @@ AdminClient.prototype.describeTopics = function (topics, options, cb) {
}
});
};

/**
* List offsets for topic partition(s).
*
* @param {Array<{topic: string, partition: number, offset: OffsetSpec}>} partitions - The list of partitions to fetch offsets for.
* @param {any?} options
* @param {number?} options.timeout - The request timeout in milliseconds.
* May be unset (default: 5000)
* @param {RdKafka.IsolationLevel?} options.isolationLevel - The isolation level for reading the offsets.
* (default: READ_UNCOMMITTED)
* @param {function} cb - The callback to be executed when finished.
*/
AdminClient.prototype.listOffsets = function (partitions, options, cb) {
if (!this._isConnected) {
throw new Error('Client is disconnected');
}

if(!options) {
options = {};
}

if (!Object.hasOwn(options, 'timeout')) {
options.timeout = 5000;
}

if(!Object.hasOwn(options, 'isolationLevel')) {
options.isolationLevel = IsolationLevel.READ_UNCOMMITTED;
}

this._client.listOffsets(partitions, options, function (err, offsets) {
if (err) {
if (cb) {
cb(LibrdKafkaError.create(err));
}
return;
}

if (cb) {
cb(null, offsets);
}
});
};
123 changes: 122 additions & 1 deletion lib/kafkajs/_admin.js
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
const { OffsetSpec } = require('../admin');
const RdKafka = require('../rdkafka');
const { kafkaJSToRdKafkaConfig,
createKafkaJsErrorFromLibRdKafkaError,
Expand All @@ -10,6 +11,7 @@ const { kafkaJSToRdKafkaConfig,
severityToLogLevel,
} = require('./_common');
const error = require('./_error');
const { hrtime } = require('process');

/**
* NOTE: The Admin client is currently in an experimental state with many
Expand Down Expand Up @@ -666,10 +668,129 @@ class Admin {
});
});
}

/**
* List offsets for the specified topic partition(s).
*
* @param {string} topic - The topic to fetch offsets for.
* @param {object?} options
* @param {number?} options.timeout - The request timeout in milliseconds.
* May be unset (default: 5000)
* @param {KafkaJS.IsolationLevel?} options.isolationLevel - The isolation level for reading the offsets.
* (default: READ_UNCOMMITTED)
*
* @returns {Promise<Array<{partition: number, offset: string, high: string; low: string}>>}
*/
async fetchTopicOffsets(topic, options = {}) {
if (this.#state !== AdminState.CONNECTED) {
throw new error.KafkaJSError("Admin client is not connected.", { code: error.ErrorCodes.ERR__STATE });
}

if (!Object.hasOwn(options, 'timeout')) {
options.timeout = 5000;
}

let topicData;
let startTime, endTime, timeTaken;

try {
// Measure time taken for fetchTopicMetadata
startTime = hrtime.bigint();
topicData = await this.fetchTopicMetadata({ topics: [topic], timeout: options.timeout });
endTime = hrtime.bigint();
timeTaken = Number(endTime - startTime) / 1e6; // Convert nanoseconds to milliseconds

// Adjust timeout for the next request
options.timeout -= timeTaken;
if (options.timeout <= 0) {
throw new error.KafkaJSError("Timeout exceeded while fetching topic metadata.", { code: error.ErrorCodes.ERR__TIMED_OUT });
}
} catch (err) {
throw new createKafkaJsErrorFromLibRdKafkaError(err);
}

const partitionIds = topicData.flatMap(topic =>
topic.partitions.map(partition => partition.partitionId)
);

const topicPartitionOffsetsLatest = partitionIds.map(partitionId => ({
topic,
partition: partitionId,
offset: OffsetSpec.LATEST
}));

const topicPartitionOffsetsEarliest = partitionIds.map(partitionId => ({
topic,
partition: partitionId,
offset: OffsetSpec.EARLIEST
}));

try {
// Measure time taken for listOffsets (latest)
startTime = hrtime.bigint();
const latestOffsets = await this.#listOffsets(topicPartitionOffsetsLatest, options);
endTime = hrtime.bigint();
timeTaken = Number(endTime - startTime) / 1e6; // Convert nanoseconds to milliseconds

// Adjust timeout for the next request
options.timeout -= timeTaken;
if (options.timeout <= 0) {
throw new error.KafkaJSError("Timeout exceeded while fetching latest offsets.", { code: error.ErrorCodes.ERR__TIMED_OUT });
}

// Measure time taken for listOffsets (earliest)
startTime = hrtime.bigint();
const earliestOffsets = await this.#listOffsets(topicPartitionOffsetsEarliest, options);
endTime = hrtime.bigint();
timeTaken = Number(endTime - startTime) / 1e6; // Convert nanoseconds to milliseconds

// Adjust timeout for the next request
options.timeout -= timeTaken;
if (options.timeout <= 0) {
throw new error.KafkaJSError("Timeout exceeded while fetching earliest offsets.", { code: error.ErrorCodes.ERR__TIMED_OUT });
}

const combinedResults = partitionIds.map(partitionId => {
const latest = latestOffsets.find(offset => offset.partition === partitionId);
const earliest = earliestOffsets.find(offset => offset.partition === partitionId);

return {
partition: partitionId,
offset: latest.offset.toString(),
high: latest.offset.toString(),
low: earliest.offset.toString()
};
});

return combinedResults;
} catch (err) {
throw createKafkaJsErrorFromLibRdKafkaError(err);
}
}

#listOffsets(partitionOffsets, options) {
return new Promise((resolve, reject) => {
this.#internalClient.listOffsets(partitionOffsets, options, (err, offsets) => {
if (err) {
reject(createKafkaJsErrorFromLibRdKafkaError(err));
} else {
resolve(offsets);
}
});
});
}
}

module.exports = {
Admin,
ConsumerGroupStates: RdKafka.AdminClient.ConsumerGroupStates,
AclOperationTypes: RdKafka.AdminClient.AclOperationTypes
AclOperationTypes: RdKafka.AdminClient.AclOperationTypes,
/**
* A list of isolation levels.
* @enum {number}
* @readonly
* @memberof KafkaJS
* @see RdKafka.IsolationLevel
*/
IsolationLevel: RdKafka.AdminClient.IsolationLevel
};
5 changes: 3 additions & 2 deletions lib/kafkajs/_kafka.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
const { Producer, CompressionTypes } = require('./_producer');
const { Consumer, PartitionAssigners } = require('./_consumer');
const { Admin, ConsumerGroupStates, AclOperationTypes } = require('./_admin');
const { Admin, ConsumerGroupStates, AclOperationTypes, IsolationLevel } = require('./_admin');
const error = require('./_error');
const { logLevel, checkIfKafkaJsKeysPresent, CompatibilityErrorMessages } = require('./_common');

Expand Down Expand Up @@ -87,4 +87,5 @@ module.exports = {
PartitionAssignors: PartitionAssigners,
CompressionTypes,
ConsumerGroupStates,
AclOperationTypes };
AclOperationTypes,
IsolationLevel};
4 changes: 4 additions & 0 deletions lib/rdkafka.js
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,8 @@ module.exports = {
Topic: Topic,
features: features,
librdkafkaVersion: lib.librdkafkaVersion,
IsolationLevel: Admin.IsolationLevel,
OffsetSpec: Admin.OffsetSpec,
ConsumerGroupStates: Admin.ConsumerGroupStates,
AclOperationTypes: Admin.AclOperationTypes,
};
Loading