From b58dd5b42a45547f22ced6440f916cdb2291345e Mon Sep 17 00:00:00 2001 From: dariakp <81593090+dariakp@users.noreply.github.com> Date: Wed, 6 Nov 2024 22:53:42 +0000 Subject: [PATCH] [create-pull-request] automated change --- docs/404.html | 32 +- docs/Next/assets/navigation.js | 2 +- docs/Next/assets/search.js | 2 +- docs/Next/classes/AbstractCursor.html | 46 +- docs/Next/classes/Admin.html | 16 +- docs/Next/classes/AggregationCursor.html | 150 +++--- docs/Next/classes/Batch.html | 4 +- docs/Next/classes/BulkOperationBase.html | 10 +- docs/Next/classes/BulkWriteResult.html | 32 +- docs/Next/classes/ChangeStream.html | 52 +- docs/Next/classes/ClientEncryption.html | 28 +- docs/Next/classes/ClientSession.html | 42 +- docs/Next/classes/Collection.html | 113 ++-- docs/Next/classes/Db.html | 57 +- docs/Next/classes/ExplainableCursor.html | 510 ++++++++++++++++++ docs/Next/classes/FindCursor.html | 158 +++--- docs/Next/classes/FindOperators.html | 22 +- docs/Next/classes/GridFSBucket.html | 26 +- docs/Next/classes/GridFSBucketReadStream.html | 10 +- .../Next/classes/GridFSBucketWriteStream.html | 34 +- docs/Next/classes/HostAddress.html | 4 +- docs/Next/classes/ListCollectionsCursor.html | 44 +- docs/Next/classes/ListIndexesCursor.html | 46 +- .../Next/classes/ListSearchIndexesCursor.html | 76 +-- docs/Next/classes/MongoBulkWriteError.html | 22 +- docs/Next/classes/MongoClient.html | 58 +- .../Next/classes/MongoCompatibilityError.html | 6 +- .../classes/MongoCursorExhaustedError.html | 6 +- docs/Next/classes/MongoCursorInUseError.html | 6 +- .../classes/MongoDBCollectionNamespace.html | 8 +- docs/Next/classes/MongoDBNamespace.html | 8 +- docs/Next/classes/MongoDriverError.html | 2 +- .../classes/MongoInvalidArgumentError.html | 6 +- .../classes/MongoMissingCredentialsError.html | 6 +- .../classes/MongoMissingDependencyError.html | 10 +- docs/Next/classes/MongoNetworkError.html | 6 +- .../classes/MongoNetworkTimeoutError.html | 6 +- .../classes/MongoOperationTimeoutError.html | 31 ++ docs/Next/classes/MongoParseError.html | 6 +- docs/Next/classes/MongoRuntimeError.html | 2 +- docs/Next/classes/MongoServerClosedError.html | 6 +- .../classes/MongoServerSelectionError.html | 8 +- docs/Next/classes/MongoSystemError.html | 8 +- .../classes/MongoTopologyClosedError.html | 6 +- docs/Next/classes/MongoWriteConcernError.html | 8 +- docs/Next/classes/OrderedBulkOperation.html | 8 +- docs/Next/classes/RunCommandCursor.html | 51 +- docs/Next/classes/ServerCapabilities.html | 4 +- docs/Next/classes/ServerDescription.html | 14 +- docs/Next/classes/ServerSession.html | 6 +- docs/Next/classes/UnorderedBulkOperation.html | 8 +- docs/Next/classes/WriteConcern.html | 20 +- docs/Next/classes/WriteConcernError.html | 10 +- docs/Next/classes/WriteError.html | 14 +- docs/Next/hierarchy.html | 2 +- .../interfaces/AWSEncryptionKeyOptions.html | 8 +- .../interfaces/AbstractCursorOptions.html | 37 +- docs/Next/interfaces/AggregateOptions.html | 38 +- .../interfaces/AggregationCursorOptions.html | 57 +- .../interfaces/AzureEncryptionKeyOptions.html | 8 +- .../interfaces/BulkWriteOperationError.html | 4 +- docs/Next/interfaces/BulkWriteOptions.html | 33 +- .../ChangeStreamCollModDocument.html | 16 +- .../ChangeStreamCreateDocument.html | 16 +- .../ChangeStreamCreateIndexDocument.html | 18 +- .../ChangeStreamDeleteDocument.html | 22 +- .../ChangeStreamDocumentCollectionUUID.html | 4 +- .../ChangeStreamDocumentCommon.html | 12 +- .../interfaces/ChangeStreamDocumentKey.html | 4 +- ...ngeStreamDocumentOperationDescription.html | 4 +- .../ChangeStreamDropDatabaseDocument.html | 16 +- .../interfaces/ChangeStreamDropDocument.html | 18 +- .../ChangeStreamDropIndexDocument.html | 18 +- .../ChangeStreamInsertDocument.html | 22 +- .../ChangeStreamInvalidateDocument.html | 14 +- .../interfaces/ChangeStreamNameSpace.html | 4 +- docs/Next/interfaces/ChangeStreamOptions.html | 48 +- ...treamRefineCollectionShardKeyDocument.html | 18 +- .../ChangeStreamRenameDocument.html | 20 +- .../ChangeStreamReplaceDocument.html | 22 +- ...ChangeStreamReshardCollectionDocument.html | 18 +- .../ChangeStreamShardCollectionDocument.html | 18 +- .../interfaces/ChangeStreamSplitEvent.html | 6 +- .../ChangeStreamUpdateDocument.html | 26 +- .../interfaces/ClientBulkWriteOptions.html | 27 +- ...ncryptionCreateDataKeyProviderOptions.html | 6 +- .../ClientEncryptionEncryptOptions.html | 14 +- .../interfaces/ClientEncryptionOptions.html | 24 +- ...ptionRewrapManyDataKeyProviderOptions.html | 4 +- ...ientEncryptionRewrapManyDataKeyResult.html | 4 +- .../Next/interfaces/ClientSessionOptions.html | 11 +- .../ClusteredCollectionOptions.html | 4 +- docs/Next/interfaces/CollationOptions.html | 4 +- docs/Next/interfaces/CollectionInfo.html | 4 +- docs/Next/interfaces/CollectionOptions.html | 8 +- .../interfaces/CommandOperationOptions.html | 27 +- docs/Next/interfaces/ConnectionOptions.html | 16 +- .../interfaces/ConnectionPoolOptions.html | 26 +- .../interfaces/CountDocumentsOptions.html | 40 +- docs/Next/interfaces/CountOptions.html | 32 +- .../interfaces/CreateCollectionOptions.html | 59 +- .../Next/interfaces/CreateIndexesOptions.html | 49 +- docs/Next/interfaces/CursorStreamOptions.html | 4 +- docs/Next/interfaces/DataKey.html | 4 +- docs/Next/interfaces/DbOptions.html | 8 +- docs/Next/interfaces/DbStatsOptions.html | 29 +- docs/Next/interfaces/DeleteManyModel.html | 8 +- docs/Next/interfaces/DeleteOneModel.html | 8 +- docs/Next/interfaces/DeleteOptions.html | 31 +- docs/Next/interfaces/DeleteResult.html | 6 +- docs/Next/interfaces/DeleteStatement.html | 10 +- .../interfaces/DropCollectionOptions.html | 29 +- docs/Next/interfaces/EndSessionOptions.html | 6 +- .../EstimatedDocumentCountOptions.html | 26 +- .../interfaces/ExplainCommandOptions.html | 6 +- docs/Next/interfaces/ExplainOptions.html | 4 +- .../interfaces/FindOneAndDeleteOptions.html | 35 +- .../interfaces/FindOneAndReplaceOptions.html | 41 +- .../interfaces/FindOneAndUpdateOptions.html | 41 +- docs/Next/interfaces/FindOptions.html | 84 +-- .../interfaces/GCPEncryptionKeyOptions.html | 14 +- docs/Next/interfaces/GridFSBucketOptions.html | 13 +- .../GridFSBucketReadStreamOptions.html | 8 +- ...FSBucketReadStreamOptionsWithRevision.html | 10 +- .../GridFSBucketWriteStreamOptions.html | 16 +- docs/Next/interfaces/GridFSChunk.html | 4 +- docs/Next/interfaces/GridFSFile.html | 8 +- docs/Next/interfaces/IndexDescription.html | 24 +- .../interfaces/IndexInformationOptions.html | 39 +- docs/Next/interfaces/InsertManyResult.html | 8 +- docs/Next/interfaces/InsertOneModel.html | 4 +- docs/Next/interfaces/InsertOneOptions.html | 31 +- docs/Next/interfaces/InsertOneResult.html | 6 +- .../interfaces/KMIPEncryptionKeyOptions.html | 8 +- .../interfaces/ListCollectionsOptions.html | 31 +- .../Next/interfaces/ListDatabasesOptions.html | 33 +- docs/Next/interfaces/ListDatabasesResult.html | 4 +- docs/Next/interfaces/MongoClientOptions.html | 112 ++-- .../interfaces/MongoNetworkErrorOptions.html | 4 +- docs/Next/interfaces/MongoOptions.html | 69 +-- docs/Next/interfaces/MonitorOptions.html | 12 +- docs/Next/interfaces/OperationOptions.html | 12 +- docs/Next/interfaces/ProxyOptions.html | 4 +- docs/Next/interfaces/RangeOptions.html | 10 +- docs/Next/interfaces/RenameOptions.html | 31 +- docs/Next/interfaces/ReplaceOneModel.html | 12 +- docs/Next/interfaces/ReplaceOptions.html | 33 +- docs/Next/interfaces/ResumeOptions.html | 4 +- .../interfaces/SearchIndexDescription.html | 8 +- .../TimeSeriesCollectionOptions.html | 4 +- docs/Next/interfaces/TransactionOptions.html | 45 +- docs/Next/interfaces/UpdateDescription.html | 10 +- docs/Next/interfaces/UpdateManyModel.html | 14 +- docs/Next/interfaces/UpdateOneModel.html | 14 +- docs/Next/interfaces/UpdateOptions.html | 33 +- docs/Next/interfaces/UpdateResult.html | 12 +- docs/Next/interfaces/UpdateStatement.html | 16 +- .../interfaces/ValidateCollectionOptions.html | 29 +- .../interfaces/WriteConcernErrorData.html | 4 +- .../interfaces/WriteConcernErrorResult.html | 4 +- .../Next/interfaces/WriteConcernSettings.html | 15 +- docs/Next/modules.html | 8 + docs/Next/types/AbstractCursorEvents.html | 2 +- docs/Next/types/AnyBulkWriteOperation.html | 2 +- docs/Next/types/BatchType.html | 2 +- docs/Next/types/CSFLEKMSTlsOptions.html | 2 +- docs/Next/types/ChangeStreamDocument.html | 2 +- docs/Next/types/ChangeStreamEvents.html | 5 +- .../types/ClientEncryptionSocketOptions.html | 2 +- .../types/ClientEncryptionTlsOptions.html | 2 +- docs/Next/types/ClientSessionEvents.html | 2 +- docs/Next/types/ConnectionEvents.html | 2 +- docs/Next/types/CursorFlag.html | 2 +- docs/Next/types/CursorTimeoutMode.html | 1 + docs/Next/types/DistinctOptions.html | 2 +- docs/Next/types/DropDatabaseOptions.html | 2 +- docs/Next/types/DropIndexesOptions.html | 2 +- docs/Next/types/EventEmitterWithState.html | 2 +- docs/Next/types/ExplainVerbosity.html | 2 +- docs/Next/types/ExplainVerbosityLike.html | 2 +- docs/Next/types/GridFSBucketEvents.html | 2 +- docs/Next/types/Hint.html | 2 +- docs/Next/types/IndexDescriptionCompact.html | 2 +- docs/Next/types/IndexDescriptionInfo.html | 2 +- docs/Next/types/IndexDirection.html | 2 +- docs/Next/types/IndexSpecification.html | 2 +- docs/Next/types/ListIndexesOptions.html | 2 +- docs/Next/types/MongoClientEvents.html | 2 +- docs/Next/types/OperationTime.html | 2 +- docs/Next/types/ProfilingLevel.html | 2 +- docs/Next/types/ProfilingLevelOptions.html | 2 +- docs/Next/types/RemoveUserOptions.html | 2 +- docs/Next/types/ResumeToken.html | 2 +- docs/Next/types/ReturnDocument.html | 2 +- docs/Next/types/RunCommandOptions.html | 5 +- docs/Next/types/RunCursorCommandOptions.html | 19 +- docs/Next/types/ServerEvents.html | 2 +- docs/Next/types/ServerSessionId.html | 2 +- docs/Next/types/SetProfilingLevelOptions.html | 2 +- docs/Next/types/TopologyEvents.html | 2 +- docs/Next/types/WithSessionCallback.html | 2 +- docs/Next/types/WithTransactionCallback.html | 2 +- docs/Next/variables/BatchType-1.html | 2 +- docs/Next/variables/CURSOR_FLAGS.html | 2 +- docs/Next/variables/CursorTimeoutMode-1.html | 15 + docs/Next/variables/ExplainVerbosity-1.html | 2 +- docs/Next/variables/ProfilingLevel-1.html | 2 +- docs/Next/variables/ReturnDocument-1.html | 2 +- docs/categories/index.xml | 5 +- docs/index.html | 34 +- docs/index.xml | 5 +- docs/tags/index.xml | 5 +- 212 files changed, 2463 insertions(+), 1659 deletions(-) create mode 100644 docs/Next/classes/ExplainableCursor.html create mode 100644 docs/Next/classes/MongoOperationTimeoutError.html create mode 100644 docs/Next/types/CursorTimeoutMode.html create mode 100644 docs/Next/variables/CursorTimeoutMode-1.html diff --git a/docs/404.html b/docs/404.html index 1a8ae099da8..ef939e8cf63 100644 --- a/docs/404.html +++ b/docs/404.html @@ -7,16 +7,16 @@ - + 404 Page not found - - - - - - + + + + + + @@ -26,8 +26,8 @@
@@ -81,13 +81,13 @@

404: Page not found

- - - - - - - + + + + + + +

Class AbstractCursor<TSchema, CursorEvents>Abstract

Type Parameters

Hierarchy (view full)

Implements

Properties

[asyncDispose] +AbstractCursor | mongodb

Class AbstractCursor<TSchema, CursorEvents>Abstract

Type Parameters

Hierarchy (view full)

Implements

Properties

[asyncDispose]: (() => Promise<void>)

An alias for AbstractCursor.close|AbstractCursor.close().

-
captureRejections: boolean

Value: boolean

+
captureRejections: boolean

Value: boolean

Change the default captureRejections option on all new EventEmitter objects.

v13.4.0, v12.16.0

captureRejectionSymbol: typeof captureRejectionSymbol

Value: Symbol.for('nodejs.rejection')

See how to write a custom rejection handler.

v13.4.0, v12.16.0

-
CLOSE: "close" = ...
defaultMaxListeners: number

By default, a maximum of 10 listeners can be registered for any single +

CLOSE: "close" = ...
defaultMaxListeners: number

By default, a maximum of 10 listeners can be registered for any single event. This limit can be changed for individual EventEmitter instances using the emitter.setMaxListeners(n) method. To change the default for allEventEmitter instances, the events.defaultMaxListeners property @@ -90,15 +90,15 @@ regular 'error' listener is installed.

v13.6.0, v12.17.0

Accessors

  • get closed(): boolean
  • The cursor is closed and all remaining locally buffered documents have been iterated.

    -

    Returns boolean

  • get id(): undefined | Long
  • The cursor has no id until it receives a response from the initial cursor creating command.

    It is non-zero for as long as the database has an open cursor.

    The initiating command may receive a zero id if the entire result is in the firstBatch.

    -

    Returns undefined | Long

  • get killed(): boolean
  • A killCursors command was attempted on this cursor. This is performed if the cursor id is non zero.

    -

    Returns boolean

Methods

  • Type Parameters

    • K

    Parameters

    • error: Error
    • event: string | symbol
    • Rest...args: AnyRest

    Returns void

Methods

  • Type Parameters

    • K

    Parameters

    • error: Error
    • event: string | symbol
    • Rest...args: AnyRest

    Returns void

  • Add a cursor flag to the cursor

    Parameters

    • flag:
          | "tailable"
          | "oplogReplay"
          | "noCursorTimeout"
          | "awaitData"
          | "exhaust"
          | "partial"

      The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial' -.

    • value: boolean

      The flag boolean value.

      -

    Returns this

  • Alias for emitter.on(eventName, listener).

    +

Returns this

  • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments +

Returns this

  • Frees any client-side resources used by the cursor.

    +

    Parameters

    • Optionaloptions: {
          timeoutMS?: number;
      }
      • OptionaltimeoutMS?: number

    Returns Promise<void>

  • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments to each.

    Returns true if the event had listeners, false otherwise.

    import { EventEmitter } from 'node:events';
    const myEmitter = new EventEmitter();

    // First listener
    myEmitter.on('event', function firstListener() {
    console.log('Helloooo! first listener');
    });
    // Second listener
    myEmitter.on('event', function secondListener(arg1, arg2) {
    console.log(`event with parameters ${arg1}, ${arg2} in second listener`);
    });
    // Third listener
    myEmitter.on('event', function thirdListener(...args) {
    const parameters = args.join(', ');
    console.log(`event with parameters ${parameters} in third listener`);
    });

    console.log(myEmitter.listeners('event'));

    myEmitter.emit('event', 1, 2, 3, 4, 5);

    // Prints:
    // [
    // [Function: firstListener],
    // [Function: secondListener],
    // [Function: thirdListener]
    // ]
    // Helloooo! first listener
    // event with parameters 1, 2 in second listener
    // event with parameters 1, 2, 3, 4, 5 in third listener @@ -128,10 +128,10 @@
      • (doc): boolean | void
      • Parameters

        Returns boolean | void

Returns Promise<void>

  • Will be removed in a future release. Use for await...of instead.
-
  • Returns the number of listeners listening for the event named eventName. If listener is provided, it will return how many times the listener is found in the list of the listeners of the event.

    Type Parameters

    • EventKey extends string | number | symbol

    Parameters

    Returns number

    v3.2.0

    @@ -161,10 +161,10 @@
    const cursor: FindCursor<Document> = coll.find();
    const mappedCursor: FindCursor<number> = cursor.map(doc => Object.keys(doc).length);
    const keyCounts: number[] = await mappedCursor.toArray(); // cursor.toArray() still returns Document[]
    -
  • Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)

    +
  • Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)

    Parameters

    • value: number

      Number of milliseconds to wait before aborting the query.

      -

    Returns this

  • Alias for emitter.removeListener().

    +

Returns this

  • Removes all listeners, or those of the specified eventName.

    It is bad practice to remove listeners added elsewhere in the code, particularly when the EventEmitter instance was created by some other component or module (e.g. sockets or file streams).

    @@ -405,21 +405,21 @@
  • Rewind this cursor to its uninitialized state. Any options that are present on the cursor will remain in effect. Iterating this cursor will cause new queries to be sent to the server, even if the resultant data has already been retrieved by this cursor.

    -

    Returns void

  • By default EventEmitters will print a warning if more than 10 listeners are added for a particular event. This is a useful default that helps finding memory leaks. The emitter.setMaxListeners() method allows the limit to be modified for this specific EventEmitter instance. The value can be set to Infinity (or 0) to indicate an unlimited number of listeners.

    Returns a reference to the EventEmitter, so that calls can be chained.

    Parameters

    • n: number

    Returns this

    v0.3.5

    -
  • Returns an array of documents. The caller is responsible for making sure that there is enough memory to store the results. Note that the array only contains partial results when this cursor had been previously accessed. In that case, cursor.rewind() can be used to reset the cursor.

    -

    Returns Promise<TSchema[]>

  • Experimental

    Listens once to the abort event on the provided signal.

    +

Returns this

Methods

  • Execute a command

    +

Returns Promise<Document>

  • Execute a command

    The driver will ensure the following fields are attached to the command sent to the server:

    • lsid - sourced from an implicit session or options.session
    • @@ -38,18 +38,18 @@
  • Optionaloptions: RunCommandOptions

    Optional settings for the command

Returns Promise<Document>

  • Ping the MongoDB server and retrieve results

    +

Returns Promise<ListDatabasesResult>

  • Remove a user from a database

    +

Returns Promise<Document>

  • Remove a user from a database

    Parameters

    Returns Promise<boolean>

  • Get ReplicaSet status

    +

Returns Promise<boolean>

  • Retrieve the server build information

    +

Returns Promise<Document>

  • Retrieve this db's server status.

    +

Returns Promise<Document>

  • Validate an existing collection

    +

Returns Promise<Document>

  • Validate an existing collection

    Parameters

    • collectionName: string

      The name of the collection to validate.

    • options: ValidateCollectionOptions = {}

      Optional settings for the command

      -

    Returns Promise<Document>

+

Returns Promise<Document>

diff --git a/docs/Next/classes/AggregationCursor.html b/docs/Next/classes/AggregationCursor.html index 746a1e071ab..01a3cb62673 100644 --- a/docs/Next/classes/AggregationCursor.html +++ b/docs/Next/classes/AggregationCursor.html @@ -2,7 +2,7 @@ allowing for iteration over the results returned from the underlying query. It supports one by one document iteration, conversion to an array or can be iterated as a Node 4.X or higher stream

-

Type Parameters

Hierarchy (view full)

Properties

Type Parameters

  • TSchema = any

Hierarchy (view full)

Properties

[asyncDispose]: (() => Promise<void>)

An alias for AbstractCursor.close|AbstractCursor.close().

-
pipeline: Document[]
captureRejections: boolean

Value: boolean

+
pipeline: Document[]
captureRejections: boolean

Value: boolean

Change the default captureRejections option on all new EventEmitter objects.

v13.4.0, v12.16.0

-
captureRejectionSymbol: typeof captureRejectionSymbol

Value: Symbol.for('nodejs.rejection')

+
captureRejectionSymbol: typeof captureRejectionSymbol

Value: Symbol.for('nodejs.rejection')

See how to write a custom rejection handler.

v13.4.0, v12.16.0

-
CLOSE: "close" = ...
defaultMaxListeners: number

By default, a maximum of 10 listeners can be registered for any single +

CLOSE: "close" = ...
defaultMaxListeners: number

By default, a maximum of 10 listeners can be registered for any single event. This limit can be changed for individual EventEmitter instances using the emitter.setMaxListeners(n) method. To change the default for allEventEmitter instances, the events.defaultMaxListeners property @@ -103,74 +104,77 @@ listeners, respectively. Its name property is set to 'MaxListenersExceededWarning'.

v0.11.2

-
errorMonitor: typeof errorMonitor

This symbol shall be used to install a listener for only monitoring 'error' events. Listeners installed using this symbol are called before the regular 'error' listeners are called.

+
errorMonitor: typeof errorMonitor

This symbol shall be used to install a listener for only monitoring 'error' events. Listeners installed using this symbol are called before the regular 'error' listeners are called.

Installing a listener using this symbol does not change the behavior once an 'error' event is emitted. Therefore, the process will still crash if no regular 'error' listener is installed.

v13.6.0, v12.17.0

-

Accessors

  • get closed(): boolean
  • The cursor is closed and all remaining locally buffered documents have been iterated.

    -

    Returns boolean

  • get id(): undefined | Long
  • The cursor has no id until it receives a response from the initial cursor creating command.

    +

Accessors

  • get closed(): boolean
  • The cursor is closed and all remaining locally buffered documents have been iterated.

    +

    Returns boolean

  • get id(): undefined | Long
  • The cursor has no id until it receives a response from the initial cursor creating command.

    It is non-zero for as long as the database has an open cursor.

    The initiating command may receive a zero id if the entire result is in the firstBatch.

    -

    Returns undefined | Long

  • get killed(): boolean
  • A killCursors command was attempted on this cursor. This is performed if the cursor id is non zero.

    -

    Returns boolean

Methods

  • Type Parameters

    • K

    Parameters

    • error: Error
    • event: string | symbol
    • Rest...args: AnyRest

    Returns void

Methods

  • Type Parameters

    • K

    Parameters

    • error: Error
    • event: string | symbol
    • Rest...args: AnyRest

    Returns void

  • Add a cursor flag to the cursor

    Parameters

    • flag:
          | "tailable"
          | "oplogReplay"
          | "noCursorTimeout"
          | "awaitData"
          | "exhaust"
          | "partial"

      The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial' -.

    • value: boolean

      The flag boolean value.

      -

    Returns this

  • Alias for emitter.on(eventName, listener).

    +

Returns this

  • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments +

Returns this

  • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments to each.

    Returns true if the event had listeners, false otherwise.

    import { EventEmitter } from 'node:events';
    const myEmitter = new EventEmitter();

    // First listener
    myEmitter.on('event', function firstListener() {
    console.log('Helloooo! first listener');
    });
    // Second listener
    myEmitter.on('event', function secondListener(arg1, arg2) {
    console.log(`event with parameters ${arg1}, ${arg2} in second listener`);
    });
    // Third listener
    myEmitter.on('event', function thirdListener(...args) {
    const parameters = args.join(', ');
    console.log(`event with parameters ${parameters} in third listener`);
    });

    console.log(myEmitter.listeners('event'));

    myEmitter.emit('event', 1, 2, 3, 4, 5);

    // Prints:
    // [
    // [Function: firstListener],
    // [Function: secondListener],
    // [Function: thirdListener]
    // ]
    // Helloooo! first listener
    // event with parameters 1, 2 in second listener
    // event with parameters 1, 2, 3, 4, 5 in third listener

    Type Parameters

    • EventKey extends "close"

    Parameters

    Returns boolean

    v0.1.26

    -
  • Returns an array listing the events for which the emitter has registered listeners. The values in the array are strings or Symbols.

    import { EventEmitter } from 'node:events';

    const myEE = new EventEmitter();
    myEE.on('foo', () => {});
    myEE.on('bar', () => {});

    const sym = Symbol('symbol');
    myEE.on(sym, () => {});

    console.log(myEE.eventNames());
    // Prints: [ 'foo', 'bar', Symbol(symbol) ]

    Returns string[]

    v6.0.0

    -
  • Iterates over all the documents for this cursor using the iterator, callback pattern.

    If the iterator returns false, iteration will stop.

    Parameters

    • iterator: ((doc: TSchema) => boolean | void)

      The iteration callback.

        • (doc): boolean | void
        • Parameters

          Returns boolean | void

    Returns Promise<void>

    • Will be removed in a future release. Use for await...of instead.
    -
  • Returns the number of listeners listening for the event named eventName. If listener is provided, it will return how many times the listener is found in the list of the listeners of the event.

    Type Parameters

    • EventKey extends "close"

    Parameters

    Returns number

    v3.2.0

    -
  • Map all documents using the provided function If there is a transform set on the cursor, that will be called first and the result passed to this function's transform.

    Type Parameters

    • T

    Parameters

    • transform: ((doc: TSchema) => T)

      The mapping transformation method.

      @@ -191,17 +195,17 @@
      const cursor: FindCursor<Document> = coll.find();
      const mappedCursor: FindCursor<number> = cursor.map(doc => Object.keys(doc).length);
      const keyCounts: number[] = await mappedCursor.toArray(); // cursor.toArray() still returns Document[]
      -
  • Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)

    Parameters

    • value: number

      Number of milliseconds to wait before aborting the query.

      -

    Returns this

  • Alias for emitter.removeListener().

    +

Returns this

  • Adds the listener function to the end of the listeners array for the event named eventName. No checks are made to see if the listener has already been added. Multiple calls passing the same combination of eventName and listener will result in the listener being added, and called, multiple times.

    @@ -216,7 +220,7 @@

    Returns this

    v0.1.101

    -
  • Adds the listener function to the end of the listeners array for the event +

  • Adds the listener function to the end of the listeners array for the event named eventName. No checks are made to see if the listener has already been added. Multiple calls passing the same combination of eventName and listener will result in the listener being added, and called, multiple times.

    @@ -231,7 +235,7 @@

    Returns this

    v0.1.101

    -
  • Adds the listener function to the end of the listeners array for the event +

  • Adds the listener function to the end of the listeners array for the event named eventName. No checks are made to see if the listener has already been added. Multiple calls passing the same combination of eventName and listener will result in the listener being added, and called, multiple times.

    @@ -246,7 +250,7 @@

    Returns this

    v0.1.101

    -
  • Adds a one-time listener function for the event named eventName. The next time eventName is triggered, this listener is removed and then invoked.

    server.once('connection', (stream) => {
    console.log('Ah, we have our first user!');
    });
    @@ -259,7 +263,7 @@

    Returns this

    v0.3.0

    -
  • Adds a one-time listener function for the event named eventName. The +

  • Adds a one-time listener function for the event named eventName. The next time eventName is triggered, this listener is removed and then invoked.

    server.once('connection', (stream) => {
    console.log('Ah, we have our first user!');
    });
    @@ -272,7 +276,7 @@

    Returns this

    v0.3.0

    -
  • Adds a one-time listener function for the event named eventName. The +

  • Adds a one-time listener function for the event named eventName. The next time eventName is triggered, this listener is removed and then invoked.

    server.once('connection', (stream) => {
    console.log('Ah, we have our first user!');
    });
    @@ -285,8 +289,8 @@

    Returns this

    v0.3.0

    -
  • Add an out stage to the aggregation pipeline

    -

    Parameters

    • $out: string | {
          coll: string;
          db: string;
      }

    Returns this

  • Add an out stage to the aggregation pipeline

    +

    Parameters

    • $out: string | {
          coll: string;
          db: string;
      }

    Returns this

  • Add a project stage to the aggregation pipeline

    Type Parameters

    Parameters

    Returns AggregationCursor<T>

    In order to strictly type this function you must provide an interface that represents the effect of your projection on the result documents.

    By default chaining a projection to your cursor changes the returned type to the generic Document type. @@ -351,21 +355,21 @@

    const cursor: AggregationCursor<{ a: number; b: string }> = coll.aggregate([]);
    const projectCursor = cursor.project<{ a: number }>({ _id: 0, a: true });
    const aPropOnlyArray: {a: number}[] = await projectCursor.toArray();

    // or always use chaining and save the final cursor

    const cursor = coll.aggregate().project<{ a: string }>({
    _id: 0,
    a: { $convert: { input: '$a', to: 'string' }
    }});
    -
  • Returns a copy of the array of listeners for the event named eventName, including any wrappers (such as those created by .once()).

    import { EventEmitter } from 'node:events';
    const emitter = new EventEmitter();
    emitter.once('log', () => console.log('log once'));

    // Returns a new Array with a function `onceWrapper` which has a property
    // `listener` which contains the original listener bound above
    const listeners = emitter.rawListeners('log');
    const logFnWrapper = listeners[0];

    // Logs "log once" to the console and does not unbind the `once` event
    logFnWrapper.listener();

    // Logs "log once" to the console and removes the listener
    logFnWrapper();

    emitter.on('log', () => console.log('log persistently'));
    // Will return a new Array with a single function bound by `.on()` above
    const newListeners = emitter.rawListeners('log');

    // Logs "log persistently" twice
    newListeners[0]();
    emitter.emit('log');

    Type Parameters

    • EventKey extends "close"

    Parameters

    Returns AbstractCursorEvents[EventKey][]

    v9.4.0

    -
  • Removes all listeners, or those of the specified eventName.

    It is bad practice to remove listeners added elsewhere in the code, particularly when the EventEmitter instance was created by some other component or module (e.g. sockets or file streams).

    Returns a reference to the EventEmitter, so that calls can be chained.

    Type Parameters

    • EventKey extends "close"

    Parameters

    • Optionalevent: string | symbol | EventKey

    Returns this

    v0.1.26

    -
  • Rewind this cursor to its uninitialized state. Any options that are present on the cursor will remain in effect. Iterating this cursor will cause new queries to be sent to the server, even if the resultant data has already been retrieved by this cursor.

    -

    Returns void

  • By default EventEmitters will print a warning if more than 10 listeners are added for a particular event. This is a useful default that helps finding memory leaks. The emitter.setMaxListeners() method allows the limit to be modified for this specific EventEmitter instance. The value can be set to Infinity (or 0) to indicate an unlimited number of listeners.

    Returns a reference to the EventEmitter, so that calls can be chained.

    Parameters

    • n: number

    Returns this

    v0.3.5

    -
  • Returns an array of documents. The caller is responsible for making sure that there is enough memory to store the results. Note that the array only contains partial results when this cursor had been previously accessed. In that case, cursor.rewind() can be used to reset the cursor.

    -

    Returns Promise<TSchema[]>

  • Experimental

    Listens once to the abort event on the provided signal.

    +

Returns this

  • Returns a copy of the array of listeners for the event named eventName.

    +
  • Returns the currently set max amount of listeners.

    +
  • A class method that returns the number of listeners for the given eventName registered on the given emitter.

    +
  • A class method that returns the number of listeners for the given eventName registered on the given emitter.

    import { EventEmitter, listenerCount } from 'node:events';

    const myEmitter = new EventEmitter();
    myEmitter.on('event', () => {});
    myEmitter.on('event', () => {});
    console.log(listenerCount(myEmitter, 'event'));
    // Prints: 2
    @@ -508,7 +512,7 @@
  • eventName: string | symbol

    The event name

Returns number

v0.9.12

Since v3.2.0 - Use listenerCount instead.

-
  • import { on, EventEmitter } from 'node:events';
    import process from 'node:process';

    const ee = new EventEmitter();

    // Emit later on
    process.nextTick(() => {
    ee.emit('foo', 'bar');
    ee.emit('foo', 42);
    });

    for await (const event of on(ee, 'foo')) {
    // The execution of this inner block is synchronous and it
    // processes one event at a time (even with await). Do not use
    // if concurrent execution is required.
    console.log(event); // prints ['bar'] [42]
    }
    // Unreachable here +
  • import { on, EventEmitter } from 'node:events';
    import process from 'node:process';

    const ee = new EventEmitter();

    // Emit later on
    process.nextTick(() => {
    ee.emit('foo', 'bar');
    ee.emit('foo', 42);
    });

    for await (const event of on(ee, 'foo')) {
    // The execution of this inner block is synchronous and it
    // processes one event at a time (even with await). Do not use
    // if concurrent execution is required.
    console.log(event); // prints ['bar'] [42]
    }
    // Unreachable here

    Returns an AsyncIterator that iterates eventName events. It will throw @@ -525,7 +529,7 @@

    Parameters

    • emitter: EventEmitter<DefaultEventMap>
    • eventName: string | symbol
    • Optionaloptions: StaticEventEmitterIteratorOptions

    Returns AsyncIterableIterator<any[]>

    An AsyncIterator that iterates eventName events emitted by the emitter

    v13.6.0, v12.16.0

    -
  • Parameters

    • emitter: EventTarget
    • eventName: string
    • Optionaloptions: StaticEventEmitterIteratorOptions

    Returns AsyncIterableIterator<any[]>

  • Creates a Promise that is fulfilled when the EventEmitter emits the given +

  • Parameters

    • emitter: EventTarget
    • eventName: string
    • Optionaloptions: StaticEventEmitterIteratorOptions

    Returns AsyncIterableIterator<any[]>

  • Creates a Promise that is fulfilled when the EventEmitter emits the given event or that is rejected if the EventEmitter emits 'error' while waiting. The Promise will resolve with an array of all the arguments emitted to the given event.

    @@ -545,9 +549,9 @@

    Parameters

    • emitter: EventEmitter<DefaultEventMap>
    • eventName: string | symbol
    • Optionaloptions: StaticEventEmitterOptions

    Returns Promise<any[]>

    v11.13.0, v10.16.0

    -
  • Parameters

    • emitter: EventTarget
    • eventName: string
    • Optionaloptions: StaticEventEmitterOptions

    Returns Promise<any[]>

  • import { setMaxListeners, EventEmitter } from 'node:events';

    const target = new EventTarget();
    const emitter = new EventEmitter();

    setMaxListeners(5, target, emitter); +
  • Parameters

    • emitter: EventTarget
    • eventName: string
    • Optionaloptions: StaticEventEmitterOptions

    Returns Promise<any[]>

  • import { setMaxListeners, EventEmitter } from 'node:events';

    const target = new EventTarget();
    const emitter = new EventEmitter();

    setMaxListeners(5, target, emitter);

    Parameters

    • Optionaln: number

      A non-negative number. The maximum number of listeners per EventTarget event.

    • Rest...eventTargets: (EventEmitter<DefaultEventMap> | EventTarget)[]

    Returns void

    v15.4.0

    -
+
diff --git a/docs/Next/classes/Batch.html b/docs/Next/classes/Batch.html index b839c28b797..380121052aa 100644 --- a/docs/Next/classes/Batch.html +++ b/docs/Next/classes/Batch.html @@ -1,6 +1,6 @@ Batch | mongodb

Class Batch<T>

Keeps the state of a unordered batch so we can rewrite the results correctly after command execution

-

Type Parameters

Constructors

Type Parameters

Constructors

Properties

batchType: BatchType
currentIndex: number
operations: T[]
originalIndexes: number[]
originalZeroIndex: number
size: number
sizeBytes: number
+

Constructors

Properties

batchType: BatchType
currentIndex: number
operations: T[]
originalIndexes: number[]
originalZeroIndex: number
size: number
sizeBytes: number
diff --git a/docs/Next/classes/BulkOperationBase.html b/docs/Next/classes/BulkOperationBase.html index 175d0672d2c..d1486059b1f 100644 --- a/docs/Next/classes/BulkOperationBase.html +++ b/docs/Next/classes/BulkOperationBase.html @@ -1,4 +1,4 @@ -BulkOperationBase | mongodb

Class BulkOperationBaseAbstract

Hierarchy (view full)

Properties

isOrdered +BulkOperationBase | mongodb

Class BulkOperationBaseAbstract

Hierarchy (view full)

Properties

Accessors

batches bsonOptions @@ -9,14 +9,14 @@ find insert raw -

Properties

isOrdered: boolean
operationId?: number

Accessors

Methods

  • Builds a find operation for an update/updateOne/delete/deleteOne/replaceOne. +

Properties

isOrdered: boolean
operationId?: number

Accessors

Methods

  • Builds a find operation for an update/updateOne/delete/deleteOne/replaceOne. Returns a builder object used to complete the definition of the operation.

    Parameters

    Returns FindOperators

    const bulkOp = collection.initializeOrderedBulkOp();

    // Add an updateOne to the bulkOp
    bulkOp.find({ a: 1 }).updateOne({ $set: { b: 2 } });

    // Add an updateMany to the bulkOp
    bulkOp.find({ c: 3 }).update({ $set: { d: 4 } });

    // Add an upsert
    bulkOp.find({ e: 5 }).upsert().updateOne({ $set: { f: 6 } });

    // Add a deletion
    bulkOp.find({ g: 7 }).deleteOne();

    // Add a multi deletion
    bulkOp.find({ h: 8 }).delete();

    // Add a replaceOne
    bulkOp.find({ i: 9 }).replaceOne({writeConcern: { j: 10 }});

    // Update using a pipeline (requires Mongodb 4.2 or higher)
    bulk.find({ k: 11, y: { $exists: true }, z: { $exists: true } }).updateOne([
    { $set: { total: { $sum: [ '$y', '$z' ] } } }
    ]);

    // All of the ops will now be executed
    await bulkOp.execute();
    -
  • Add a single insert document to the bulk operation

    Parameters

    Returns BulkOperationBase

    const bulkOp = collection.initializeOrderedBulkOp();

    // Adds three inserts to the bulkOp.
    bulkOp
    .insert({ a: 1 })
    .insert({ b: 2 })
    .insert({ c: 3 });
    await bulkOp.execute();
    -
+
diff --git a/docs/Next/classes/BulkWriteResult.html b/docs/Next/classes/BulkWriteResult.html index cb664d3487d..1ba3638d474 100644 --- a/docs/Next/classes/BulkWriteResult.html +++ b/docs/Next/classes/BulkWriteResult.html @@ -1,5 +1,5 @@ BulkWriteResult | mongodb

Class BulkWriteResult

The result of a bulk write.

-

Properties

Properties

deletedCount: number

Number of documents deleted.

-
insertedCount: number

Number of documents inserted.

-
insertedIds: {
    [key: number]: any;
}

Inserted document generated Id's, hash key is the index of the originating operation

-
matchedCount: number

Number of documents matched for update.

-
modifiedCount: number

Number of documents modified.

-
upsertedCount: number

Number of documents upserted.

-
upsertedIds: {
    [key: number]: any;
}

Upserted document generated Id's, hash key is the index of the originating operation

-

Accessors

  • get ok(): number
  • Evaluates to true if the bulk operation correctly executes

    -

    Returns number

Methods

  • Returns the number of write errors off the bulk operation

    -

    Returns number

  • Returns true if the bulk operation contains a write error

    -

    Returns boolean

+
insertedCount: number

Number of documents inserted.

+
insertedIds: {
    [key: number]: any;
}

Inserted document generated Id's, hash key is the index of the originating operation

+
matchedCount: number

Number of documents matched for update.

+
modifiedCount: number

Number of documents modified.

+
upsertedCount: number

Number of documents upserted.

+
upsertedIds: {
    [key: number]: any;
}

Upserted document generated Id's, hash key is the index of the originating operation

+

Accessors

  • get ok(): number
  • Evaluates to true if the bulk operation correctly executes

    +

    Returns number

Methods

  • Returns the number of write errors off the bulk operation

    +

    Returns number

  • Returns true if the bulk operation contains a write error

    +

    Returns boolean

diff --git a/docs/Next/classes/ChangeStream.html b/docs/Next/classes/ChangeStream.html index 82e23330c98..fea6c11f9d7 100644 --- a/docs/Next/classes/ChangeStream.html +++ b/docs/Next/classes/ChangeStream.html @@ -1,5 +1,5 @@ ChangeStream | mongodb

Class ChangeStream<TSchema, TChange>

Creates a new Change Stream instance. Normally created using Collection.watch().

-

Type Parameters

Hierarchy (view full)

Implements

Properties

Type Parameters

Hierarchy (view full)

Implements

Properties

[asyncDispose]: (() => Promise<void>)

An alias for ChangeStream.close|ChangeStream.close().

-
namespace: MongoDBNamespace
options: ChangeStreamOptions & {
    writeConcern?: undefined;
}

WriteConcern can still be present on the options because +

namespace: MongoDBNamespace
options: ChangeStreamOptions & {
    writeConcern?: undefined;
}

WriteConcern can still be present on the options because we inherit options from the client/db/collection. The key must be present on the options in order to delete it. This allows typescript to delete the key but will not allow a writeConcern to be assigned as a property on options.

-
pipeline: Document[]
streamOptions?: CursorStreamOptions
type: symbol
captureRejections: boolean

Value: boolean

+
pipeline: Document[]
streamOptions?: CursorStreamOptions
type: symbol
captureRejections: boolean

Value: boolean

Change the default captureRejections option on all new EventEmitter objects.

v13.4.0, v12.16.0

captureRejectionSymbol: typeof captureRejectionSymbol

Value: Symbol.for('nodejs.rejection')

@@ -64,7 +64,7 @@
CHANGE: "change" = CHANGE

Fired for each new matching change in the specified namespace. Attaching a change event listener to a Change Stream will switch the stream into flowing mode. Data will then be passed as soon as it is available.

-
CLOSE: "close" = CLOSE
defaultMaxListeners: number

By default, a maximum of 10 listeners can be registered for any single +

CLOSE: "close" = CLOSE
defaultMaxListeners: number

By default, a maximum of 10 listeners can be registered for any single event. This limit can be changed for individual EventEmitter instances using the emitter.setMaxListeners(n) method. To change the default for allEventEmitter instances, the events.defaultMaxListeners property @@ -89,27 +89,27 @@ listeners, respectively. Its name property is set to 'MaxListenersExceededWarning'.

v0.11.2

-
END: "end" = END
ERROR: "error" = ERROR
errorMonitor: typeof errorMonitor

This symbol shall be used to install a listener for only monitoring 'error' events. Listeners installed using this symbol are called before the regular 'error' listeners are called.

+
END: "end" = END
ERROR: "error" = ERROR
errorMonitor: typeof errorMonitor

This symbol shall be used to install a listener for only monitoring 'error' events. Listeners installed using this symbol are called before the regular 'error' listeners are called.

Installing a listener using this symbol does not change the behavior once an 'error' event is emitted. Therefore, the process will still crash if no regular 'error' listener is installed.

v13.6.0, v12.17.0

-
INIT: "init" = INIT
MORE: "more" = MORE
RESPONSE: "response" = RESPONSE
RESUME_TOKEN_CHANGED: "resumeTokenChanged" = RESUME_TOKEN_CHANGED

Emitted each time the change stream stores a new resume token.

-

Accessors

  • get resumeToken(): unknown
  • The cached resume token that is used to resume after the most recently returned change.

    -

    Returns unknown

Methods

  • Type Parameters

    • K

    Parameters

    • error: Error
    • event: string | symbol
    • Rest...args: AnyRest

    Returns void

  • Alias for emitter.on(eventName, listener).

    -

    Type Parameters

    • EventKey extends
          | "error"
          | "close"
          | "response"
          | "more"
          | "init"
          | "change"
          | "end"
          | "resumeTokenChanged"

    Parameters

    Returns this

    v0.1.26

    +
INIT: "init" = INIT
MORE: "more" = MORE
RESPONSE: "response" = RESPONSE
RESUME_TOKEN_CHANGED: "resumeTokenChanged" = RESUME_TOKEN_CHANGED

Emitted each time the change stream stores a new resume token.

+

Accessors

  • get resumeToken(): unknown
  • The cached resume token that is used to resume after the most recently returned change.

    +

    Returns unknown

Methods

  • Type Parameters

    • K

    Parameters

    • error: Error
    • event: string | symbol
    • Rest...args: AnyRest

    Returns void

  • Frees the internal resources used by the change stream.

    -

    Returns Promise<void>

  • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments +

    Returns Promise<void>

  • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments to each.

    Returns true if the event had listeners, false otherwise.

    import { EventEmitter } from 'node:events';
    const myEmitter = new EventEmitter();

    // First listener
    myEmitter.on('event', function firstListener() {
    console.log('Helloooo! first listener');
    });
    // Second listener
    myEmitter.on('event', function secondListener(arg1, arg2) {
    console.log(`event with parameters ${arg1}, ${arg2} in second listener`);
    });
    // Third listener
    myEmitter.on('event', function thirdListener(...args) {
    const parameters = args.join(', ');
    console.log(`event with parameters ${parameters} in third listener`);
    });

    console.log(myEmitter.listeners('event'));

    myEmitter.emit('event', 1, 2, 3, 4, 5);

    // Prints:
    // [
    // [Function: firstListener],
    // [Function: secondListener],
    // [Function: thirdListener]
    // ]
    // Helloooo! first listener
    // event with parameters 1, 2 in second listener
    // event with parameters 1, 2, 3, 4, 5 in third listener
    -

    Type Parameters

    • EventKey extends
          | "error"
          | "close"
          | "response"
          | "more"
          | "init"
          | "change"
          | "end"
          | "resumeTokenChanged"

    Parameters

    Returns boolean

    v0.1.26

    +

    Type Parameters

    Parameters

    Returns boolean

    v0.1.26

  • Returns an array listing the events for which the emitter has registered listeners. The values in the array are strings or Symbols.

    import { EventEmitter } from 'node:events';

    const myEE = new EventEmitter();
    myEE.on('foo', () => {});
    myEE.on('bar', () => {});

    const sym = Symbol('symbol');
    myEE.on(sym, () => {});

    console.log(myEE.eventNames());
    // Prints: [ 'foo', 'bar', Symbol(symbol) ] @@ -120,18 +120,18 @@ set by emitter.setMaxListeners(n) or defaults to defaultMaxListeners.

    Returns number

    v1.0.0

  • Check if there is any document still available in the Change Stream

    -

    Returns Promise<boolean>

  • Returns the number of listeners listening for the event named eventName. +

    Returns Promise<boolean>

  • Returns the number of listeners listening for the event named eventName. If listener is provided, it will return how many times the listener is found in the list of the listeners of the event.

    -

    Type Parameters

    • EventKey extends
          | "error"
          | "close"
          | "response"
          | "more"
          | "init"
          | "change"
          | "end"
          | "resumeTokenChanged"

    Parameters

    Returns number

    v3.2.0

    +

    Type Parameters

    Parameters

    Returns number

    v3.2.0

  • Alias for emitter.removeListener().

    +

    Type Parameters

    Parameters

    Returns this

    v10.0.0

  • Alias for emitter.removeListener().

    Parameters

    Returns this

    v10.0.0

  • Alias for emitter.removeListener().

    @@ -149,7 +149,7 @@
    import { EventEmitter } from 'node:events';
    const myEE = new EventEmitter();
    myEE.on('foo', () => console.log('a'));
    myEE.prependListener('foo', () => console.log('b'));
    myEE.emit('foo');
    // Prints:
    // b
    // a
    -

    Type Parameters

    • EventKey extends
          | "error"
          | "close"
          | "response"
          | "more"
          | "init"
          | "change"
          | "end"
          | "resumeTokenChanged"

    Parameters

    • event: EventKey
    • listener: ChangeStreamEvents<TSchema, TChange>[EventKey]

      The callback function

      +

      Type Parameters

      Parameters

      Returns this

      v0.1.101

    • Adds the listener function to the end of the listeners array for the event named eventName. No checks are made to see if the listener has already @@ -192,7 +192,7 @@

      import { EventEmitter } from 'node:events';
      const myEE = new EventEmitter();
      myEE.once('foo', () => console.log('a'));
      myEE.prependOnceListener('foo', () => console.log('b'));
      myEE.emit('foo');
      // Prints:
      // b
      // a
      -

      Type Parameters

      • EventKey extends
            | "error"
            | "close"
            | "response"
            | "more"
            | "init"
            | "change"
            | "end"
            | "resumeTokenChanged"

      Parameters

      • event: EventKey
      • listener: ChangeStreamEvents<TSchema, TChange>[EventKey]

        The callback function

        +

        Type Parameters

        Parameters

        Returns this

        v0.3.0

      • Adds a one-time listener function for the event named eventName. The next time eventName is triggered, this listener is removed and then invoked.

        @@ -228,7 +228,7 @@

        Returns a reference to the EventEmitter, so that calls can be chained.

        -

        Type Parameters

        • EventKey extends
              | "error"
              | "close"
              | "response"
              | "more"
              | "init"
              | "change"
              | "end"
              | "resumeTokenChanged"

        Parameters

        • event: EventKey
        • listener: ChangeStreamEvents<TSchema, TChange>[EventKey]

          The callback function

          +

          Type Parameters

          Parameters

          Returns this

          v6.0.0

        • Adds the listener function to the beginning of the listeners array for the event named eventName. No checks are made to see if the listener has @@ -256,7 +256,7 @@

          Returns a reference to the EventEmitter, so that calls can be chained.

          -

          Type Parameters

          • EventKey extends
                | "error"
                | "close"
                | "response"
                | "more"
                | "init"
                | "change"
                | "end"
                | "resumeTokenChanged"

          Parameters

          • event: EventKey
          • listener: ChangeStreamEvents<TSchema, TChange>[EventKey]

            The callback function

            +

            Type Parameters

            Parameters

            Returns this

            v6.0.0

          • Adds a one-timelistener function for the event named eventName to the beginning of the listeners array. The next time eventName is triggered, this listener is removed, and then invoked.

            @@ -279,13 +279,13 @@
            import { EventEmitter } from 'node:events';
            const emitter = new EventEmitter();
            emitter.once('log', () => console.log('log once'));

            // Returns a new Array with a function `onceWrapper` which has a property
            // `listener` which contains the original listener bound above
            const listeners = emitter.rawListeners('log');
            const logFnWrapper = listeners[0];

            // Logs "log once" to the console and does not unbind the `once` event
            logFnWrapper.listener();

            // Logs "log once" to the console and removes the listener
            logFnWrapper();

            emitter.on('log', () => console.log('log persistently'));
            // Will return a new Array with a single function bound by `.on()` above
            const newListeners = emitter.rawListeners('log');

            // Logs "log persistently" twice
            newListeners[0]();
            emitter.emit('log');
            -

            Type Parameters

            • EventKey extends
                  | "error"
                  | "close"
                  | "response"
                  | "more"
                  | "init"
                  | "change"
                  | "end"
                  | "resumeTokenChanged"

            Parameters

            Returns ChangeStreamEvents<TSchema, TChange>[EventKey][]

            v9.4.0

            +

            Type Parameters

            Parameters

            Returns ChangeStreamEvents<TSchema, TChange>[EventKey][]

            v9.4.0

  • Removes all listeners, or those of the specified eventName.

    It is bad practice to remove listeners added elsewhere in the code, particularly when the EventEmitter instance was created by some other component or module (e.g. sockets or file streams).

    Returns a reference to the EventEmitter, so that calls can be chained.

    -

    Type Parameters

    • EventKey extends
          | "error"
          | "close"
          | "response"
          | "more"
          | "init"
          | "change"
          | "end"
          | "resumeTokenChanged"

    Parameters

    • Optionalevent: string | symbol | EventKey

    Returns this

    v0.1.26

    +

    Type Parameters

    Parameters

    • Optionalevent: string | symbol | EventKey

    Returns this

    v0.1.26

  • Removes the specified listener from the listener array for the event named eventName.

    const callback = (stream) => {
    console.log('someone connected!');
    };
    server.on('connection', callback);
    // ...
    server.removeListener('connection', callback);
    @@ -312,7 +312,7 @@

    Returns a reference to the EventEmitter, so that calls can be chained.

    -

    Type Parameters

    • EventKey extends
          | "error"
          | "close"
          | "response"
          | "more"
          | "init"
          | "change"
          | "end"
          | "resumeTokenChanged"

    Parameters

    Returns this

    v0.1.26

    +

    Type Parameters

    Parameters

    Returns this

    v0.1.26

  • Removes the specified listener from the listener array for the event named eventName.

    const callback = (stream) => {
    console.log('someone connected!');
    };
    server.on('connection', callback);
    // ...
    server.removeListener('connection', callback);
    @@ -377,8 +377,8 @@

    NOTE: When using a Stream to process change stream events, the stream will NOT automatically resume in the case a resumable error is encountered.

    Parameters

    Returns Readable & AsyncIterable<TChange>

    MongoChangeStreamError if the underlying cursor or the change stream is closed

    -
  • Try to get the next available document from the Change Stream's cursor or null if an empty batch is returned

    -

    Returns Promise<null | TChange>

  • Experimental

    Listens once to the abort event on the provided signal.

    +
  • Try to get the next available document from the Change Stream's cursor or null if an empty batch is returned

    +

    Returns Promise<null | TChange>

  • Experimental

    Listens once to the abort event on the provided signal.

    Listening to the abort event on abort signals is unsafe and may lead to resource leaks since another third party with the signal can call e.stopImmediatePropagation(). Unfortunately Node.js cannot change diff --git a/docs/Next/classes/ClientEncryption.html b/docs/Next/classes/ClientEncryption.html index 20f81d47207..23e70cda2a5 100644 --- a/docs/Next/classes/ClientEncryption.html +++ b/docs/Next/classes/ClientEncryption.html @@ -1,5 +1,5 @@ ClientEncryption | mongodb

    Class ClientEncryption

    The public interface for explicit in-use encryption

    -

    Constructors

    Constructors

    Accessors

    Methods

    addKeyAltName createDataKey @@ -20,7 +20,7 @@
    new ClientEncryption(mongoClient, {
    keyVaultNamespace: 'client.encryption',
    kmsProviders: {
    aws: {
    accessKeyId: AWS_ACCESS_KEY,
    secretAccessKey: AWS_SECRET_KEY
    }
    }
    });
    -

Accessors

Methods

Accessors

Methods

  • Adds a keyAltName to a key identified by the provided _id.

    This method resolves to/returns the old key value (prior to adding the new altKeyName).

    Parameters

    • _id: Binary

      The id of the document to update.

    • keyAltName: string

      a keyAltName to search for a key

      @@ -29,7 +29,7 @@
  • Creates a data key used for explicit encryption and inserts it into the key vault namespace

    Parameters

    Returns Promise<UUID>

    // Using async/await to create a local key
    const dataKeyId = await clientEncryption.createDataKey('local');
    @@ -39,7 +39,7 @@
    // Using async/await to create an aws key with a keyAltName
    const dataKeyId = await clientEncryption.createDataKey('aws', {
    masterKey: {
    region: 'us-east-1',
    key: 'xxxxxxxxxxxxxx' // CMK ARN here
    },
    keyAltNames: [ 'mySpecialKey' ]
    });
    -
  • Explicitly decrypt a provided encrypted value

    Type Parameters

    • T = any

    Parameters

    • value: Binary

      An encrypted value

    Returns Promise<T>

    a Promise that either resolves with the decrypted value, or rejects with an error

    // Decrypting value with async/await API
    async function decryptMyValue(value) {
    return clientEncryption.decrypt(value);
    }
    -
  • Deletes the key with the provided id from the keyvault, if it exists.

    Parameters

    Returns Promise<DeleteResult>

    // delete a key by _id
    const id = new Binary(); // id is a bson binary subtype 4 object
    const { deletedCount } = await clientEncryption.deleteKey(id);

    if (deletedCount != null && deletedCount > 0) {
    // successful deletion
    }
    -
  • Explicitly encrypt a provided value. Note that either options.keyId or options.keyAltName must be specified. Specifying both options.keyId and options.keyAltName is considered an error.

    Parameters

    Returns Promise<Binary>

    a Promise that either resolves with the encrypted value, or rejects with an error.

    @@ -68,7 +68,7 @@
    // Encryption using a keyAltName
    async function encryptMyData(value) {
    await clientEncryption.createDataKey('local', { keyAltNames: 'mySpecialKey' });
    return clientEncryption.encrypt(value, { keyAltName: 'mySpecialKey', algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic' });
    }
    -
  • Finds a key in the keyvault with the specified _id.

    Returns a promise that either resolves to a DataKey if a document matches the key or null if no documents match the id. The promise rejects with an error if an error is thrown.

    Parameters

    Returns Promise<null | DataKey>

    // getting a key by id
    const id = new Binary(); // id is a bson binary subtype 4 object
    const key = await clientEncryption.getKey(id);
    if (!key) {
    // key is null if there was no matching key
    }
    -
  • Finds a key in the keyvault which has the specified keyAltName.

    Parameters

    • keyAltName: string

      a keyAltName to search for a key

    Returns Promise<null | WithId<DataKey>>

    Returns a promise that either resolves to a DataKey if a document matches the key or null if no documents match the keyAltName. The promise rejects with an error if an error is thrown.

    // get a key by alt name
    const keyAltName = 'keyAltName';
    const key = await clientEncryption.getKeyByAltName(keyAltName);
    if (!key) {
    // key is null if there is no matching key
    }
    -
  • Adds a keyAltName to a key identified by the provided _id.

    This method resolves to/returns the old key value (prior to removing the new altKeyName).

    If the removed keyAltName is the last keyAltName for that key, the altKeyNames property is unset from the document.

    Parameters

    • _id: Binary

      The id of the document to update.

      @@ -108,7 +108,7 @@
  • Searches the keyvault for any data keys matching the provided filter. If there are matches, rewrapManyDataKey then attempts to re-wrap the data keys using the provided options.

    +
  • Searches the keyvault for any data keys matching the provided filter. If there are matches, rewrapManyDataKey then attempts to re-wrap the data keys using the provided options.

    If no matches are found, then no bulk write is performed.

    Returns Promise<{
        bulkWriteResult?: BulkWriteResult;
    }>

    // rewrapping all data data keys (using a filter that matches all documents)
    const filter = {};

    const result = await clientEncryption.rewrapManyDataKey(filter);
    if (result.bulkWriteResult != null) {
    // keys were re-wrapped, results will be available in the bulkWrite object.
    }
    @@ -116,4 +116,4 @@
    // attempting to rewrap all data keys with no matches
    const filter = { _id: new Binary() } // assume _id matches no documents in the database
    const result = await clientEncryption.rewrapManyDataKey(filter);

    if (result.bulkWriteResult == null) {
    // no keys matched, `bulkWriteResult` does not exist on the result object
    }
    -
+
diff --git a/docs/Next/classes/ClientSession.html b/docs/Next/classes/ClientSession.html index b20c1d6ceb2..778c017a3b8 100644 --- a/docs/Next/classes/ClientSession.html +++ b/docs/Next/classes/ClientSession.html @@ -1,13 +1,14 @@ ClientSession | mongodb

Class ClientSession

A class representing a client session on the server

NOTE: not meant to be instantiated directly.

-

Hierarchy (view full)

Implements

Properties

Hierarchy (view full)

Implements

Properties

[asyncDispose]: (() => Promise<void>)
clientOptions?: MongoOptions
clusterTime?: ClusterTime
defaultTransactionOptions: TransactionOptions
explicit: boolean
hasEnded: boolean
operationTime?: Timestamp
supports: {
    causalConsistency: boolean;
}
transaction: Transaction
captureRejections: boolean

Value: boolean

+
clientOptions: MongoOptions
clusterTime?: ClusterTime
defaultTransactionOptions: TransactionOptions
explicit: boolean
hasEnded: boolean
operationTime?: Timestamp
supports: {
    causalConsistency: boolean;
}
timeoutMS?: number

Specifies the time an operation in a given ClientSession will run until it throws a timeout error

+
transaction: Transaction
captureRejections: boolean

Value: boolean

Change the default captureRejections option on all new EventEmitter objects.

v13.4.0, v12.16.0

captureRejectionSymbol: typeof captureRejectionSymbol

Value: Symbol.for('nodejs.rejection')

@@ -89,9 +91,10 @@ regular 'error' listener is installed.

v13.6.0, v12.17.0

Accessors

  • get snapshotEnabled(): boolean
  • Whether or not this session is configured for snapshot reads

    -

    Returns boolean

Methods

  • Type Parameters

    • K

    Parameters

    • error: Error
    • event: string | symbol
    • Rest...args: AnyRest

    Returns void

  • Aborts the currently active transaction in this session.

    -

    Returns Promise<void>

  • get snapshotEnabled(): boolean
  • Whether or not this session is configured for snapshot reads

    +

    Returns boolean

Methods

  • Type Parameters

    • K

    Parameters

    • error: Error
    • event: string | symbol
    • Rest...args: AnyRest

    Returns void

  • Aborts the currently active transaction in this session.

    +

    Parameters

    • Optionaloptions: {
          timeoutMS?: number;
      }

      Optional options, can be used to override defaultTimeoutMS.

      +
      • OptionaltimeoutMS?: number

    Returns Promise<void>

  • Advances the clusterTime for a ClientSession to the provided clusterTime of another ClientSession

    Parameters

    • clusterTime: ClusterTime

      the $clusterTime returned by the server from another session in the form of a document containing the BSON.Timestamp clusterTime and signature

      -

    Returns void

  • Advances the operationTime for a ClientSession.

    +

Returns void

  • Advances the operationTime for a ClientSession.

    Parameters

    • operationTime: Timestamp

      the BSON.Timestamp of the operation type it is desired to advance to

      -

    Returns void

  • Commits the currently active transaction in this session.

    -

    Returns Promise<void>

  • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments +

Returns void

  • Commits the currently active transaction in this session.

    +

    Parameters

    • Optionaloptions: {
          timeoutMS?: number;
      }

      Optional options, can be used to override defaultTimeoutMS.

      +
      • OptionaltimeoutMS?: number

    Returns Promise<void>

  • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments to each.

    Returns true if the event had listeners, false otherwise.

    import { EventEmitter } from 'node:events';
    const myEmitter = new EventEmitter();

    // First listener
    myEmitter.on('event', function firstListener() {
    console.log('Helloooo! first listener');
    });
    // Second listener
    myEmitter.on('event', function secondListener(arg1, arg2) {
    console.log(`event with parameters ${arg1}, ${arg2} in second listener`);
    });
    // Third listener
    myEmitter.on('event', function thirdListener(...args) {
    const parameters = args.join(', ');
    console.log(`event with parameters ${parameters} in third listener`);
    });

    console.log(myEmitter.listeners('event'));

    myEmitter.emit('event', 1, 2, 3, 4, 5);

    // Prints:
    // [
    // [Function: firstListener],
    // [Function: secondListener],
    // [Function: thirdListener]
    // ]
    // Helloooo! first listener
    // event with parameters 1, 2 in second listener
    // event with parameters 1, 2, 3, 4, 5 in third listener @@ -113,9 +117,9 @@ the transaction is aborted.

    Does not end the session on the server.

    Parameters

    • Optionaloptions: EndSessionOptions

      Optional settings. Currently reserved for future use

      -

    Returns Promise<void>

  • Used to determine if this session equals another

    +

Returns Promise<void>

  • Used to determine if this session equals another

    Parameters

    Returns boolean

  • Returns an array listing the events for which the emitter has registered +

Returns boolean

  • Returns an array listing the events for which the emitter has registered listeners. The values in the array are strings or Symbols.

    import { EventEmitter } from 'node:events';

    const myEE = new EventEmitter();
    myEE.on('foo', () => {});
    myEE.on('bar', () => {});

    const sym = Symbol('symbol');
    myEE.on(sym, () => {});

    console.log(myEE.eventNames());
    // Prints: [ 'foo', 'bar', Symbol(symbol) ]
    @@ -125,8 +129,8 @@ set by emitter.setMaxListeners(n) or defaults to defaultMaxListeners.

    Returns number

    v1.0.0

  • Increment the transaction number on the internal ServerSession

    -

    Returns void

  • Returns boolean

    whether this session is currently in a transaction or not

    -
  • Returns the number of listeners listening for the event named eventName. +

    Returns void

  • Returns boolean

    whether this session is currently in a transaction or not

    +
  • Returns the number of listeners listening for the event named eventName. If listener is provided, it will return how many times the listener is found in the list of the listeners of the event.

    Type Parameters

    • EventKey extends "ended"

    Parameters

    Returns number

    v3.2.0

    @@ -383,14 +387,16 @@

Returns void

IMPORTANT: Running operations in parallel is not supported during a transaction. The use of Promise.all, Promise.allSettled, Promise.race, etc to parallelize operations inside a transaction is undefined behaviour.

-
  • This is here to ensure that ClientSession is never serialized to BSON.

    -

    Returns never

  • Starts a transaction and runs a provided function, ensuring the commitTransaction is always attempted when all operations run in the function have completed.

    +
  • This is here to ensure that ClientSession is never serialized to BSON.

    +

    Returns never

  • Experimental

    Listens once to the abort event on the provided signal.

    +
  • Experimental

    Listens once to the abort event on the provided signal.

    Listening to the abort event on abort signals is unsafe and may lead to resource leaks since another third party with the signal can call e.stopImmediatePropagation(). Unfortunately Node.js cannot change @@ -496,4 +502,4 @@

    Parameters

    • Optionaln: number

      A non-negative number. The maximum number of listeners per EventTarget event.

    • Rest...eventTargets: (EventEmitter<DefaultEventMap> | EventTarget)[]

    Returns void

    v15.4.0

    -
+
diff --git a/docs/Next/classes/Collection.html b/docs/Next/classes/Collection.html index 4cb208c4af9..f9b1c6cc87a 100644 --- a/docs/Next/classes/Collection.html +++ b/docs/Next/classes/Collection.html @@ -4,13 +4,14 @@
import { MongoClient } from 'mongodb';

interface Pet {
name: string;
kind: 'dog' | 'cat' | 'fish';
}

const client = new MongoClient('mongodb://localhost:27017');
const pets = client.db().collection<Pet>('pets');

const petCursor = pets.find();

for await (const pet of petCursor) {
console.log(`${pet.name} is a ${pet.kind}!`);
}
-

Type Parameters

Accessors

Type Parameters

Accessors

  • get dbName(): string
  • The name of the database this collection belongs to

    -

    Returns string

  • get namespace(): string
  • The namespace of this collection, in the format ${this.dbName}.${this.collectionName}

    -

    Returns string

  • get readConcern(): undefined | ReadConcern
  • The current readConcern of the collection. If not explicitly defined for +

Accessors

  • get dbName(): string
  • The name of the database this collection belongs to

    +

    Returns string

  • get namespace(): string
  • The namespace of this collection, in the format ${this.dbName}.${this.collectionName}

    +

    Returns string

  • get readConcern(): undefined | ReadConcern
  • The current readConcern of the collection. If not explicitly defined for this collection, will be inherited from the parent DB

    -

    Returns undefined | ReadConcern

  • get readPreference(): undefined | ReadPreference
  • The current readPreference of the collection. If not explicitly defined for this collection, will be inherited from the parent DB

    -

    Returns undefined | ReadPreference

  • get writeConcern(): undefined | WriteConcern
  • The current writeConcern of the collection. If not explicitly defined for this collection, will be inherited from the parent DB

    -

    Returns undefined | WriteConcern

Methods

Methods

  • Perform a bulkWrite operation without a fluent API

    +

Returns AggregationCursor<T>

  • An estimated count of matching documents in the db to a filter.

    +
  • An estimated count of matching documents in the db to a filter.

    NOTE: This method has been deprecated, since it does not provide an accurate count of the documents in a collection. To obtain an accurate count of documents in the collection, use Collection#countDocuments| countDocuments. To obtain an estimated count of all documents in the collection, use Collection#estimatedDocumentCount| estimatedDocumentCount.

    Parameters

    Returns Promise<number>

    use Collection#countDocuments| countDocuments or Collection#estimatedDocumentCount| estimatedDocumentCount instead

    -
  • Gets the number of documents matching the filter. +

-
  • Creates an index on the db and collection collection.

    +
  • Creates an index on the db and collection collection.

    Parameters

    Returns Promise<string>

    const collection = client.db('foo').collection('bar');

    await collection.createIndex({ a: 1, b: -1 });

    // Alternate syntax for { c: 1, d: -1 } that ensures order of indexes
    await collection.createIndex([ [c, 1], [d, -1] ]);

    // Equivalent to { e: 1 }
    await collection.createIndex('e');

    // Equivalent to { f: 1, g: 1 }
    await collection.createIndex(['f', 'g'])

    // Equivalent to { h: 1, i: -1 }
    await collection.createIndex([ { h: 1 }, { i: -1 } ]);

    // Equivalent to { j: 1, k: -1, l: 2d }
    await collection.createIndex(['j', ['k', -1], { l: '2d' }])
    -
  • Creates multiple indexes in the collection, this method is only supported for +

  • Creates multiple indexes in the collection, this method is only supported for MongoDB 2.6 or higher. Earlier version of MongoDB will throw a command not supported error.

    Note: Unlike Collection#createIndex| createIndex, this function takes in raw index specifications. @@ -136,33 +137,33 @@

Returns Promise<string[]>

const collection = client.db('foo').collection('bar');
await collection.createIndexes([
// Simple index on field fizz
{
key: { fizz: 1 },
}
// wildcard index
{
key: { '$**': 1 }
},
// named index on darmok and jalad
{
key: { darmok: 1, jalad: -1 }
name: 'tanagra'
}
]);
-
  • Creates a single search index for the collection.

    +
  • Creates a single search index for the collection.

    Parameters

    Returns Promise<string>

    A promise that resolves to the name of the new search index.

    Only available when used against a 7.0+ Atlas cluster.

    -
  • Creates multiple search indexes for the current collection.

    +
  • Creates multiple search indexes for the current collection.

    Parameters

    Returns Promise<string[]>

    A promise that resolves to an array of the newly created search index names.

    Only available when used against a 7.0+ Atlas cluster.

    -
  • Delete a document from a collection

    +

Returns Promise<DeleteResult>

  • The distinct command returns a list of distinct values for the given key across a collection.

    +

Returns Promise<DeleteResult>

  • Drop the collection from the database, removing it permanently. New accesses will create a new collection.

    +

Returns Promise<Flatten<WithId<TSchema>[Key]>[]>

  • Type Parameters

    • Key extends string | number | symbol

    Parameters

    Returns Promise<Flatten<WithId<TSchema>[Key]>[]>

  • Type Parameters

    • Key extends string | number | symbol

    Parameters

    Returns Promise<Flatten<WithId<TSchema>[Key]>[]>

  • Parameters

    • key: string

    Returns Promise<any[]>

  • Parameters

    Returns Promise<any[]>

  • Parameters

    Returns Promise<any[]>

    • Drop the collection from the database, removing it permanently. New accesses will create a new collection.

      Parameters

      Returns Promise<boolean>

    • Drops an index from this collection.

      +

    Returns Promise<boolean>

    • Drops all indexes from this collection.

      +

    Returns Promise<Document>

    • Drops all indexes from this collection.

      Parameters

      Returns Promise<boolean>

    • Deletes a search index by index name.

      +

    Returns Promise<boolean>

    • Deletes a search index by index name.

      Parameters

      • name: string

        The name of the search index to be deleted.

      Returns Promise<void>

      Only available when used against a 7.0+ Atlas cluster.

      -
    • Gets an estimate of the count of documents in a collection using collection metadata. +

    • Gets an estimate of the count of documents in a collection using collection metadata. This will always run a count command on all server versions.

      due to an oversight in versions 5.0.0-5.0.8 of MongoDB, the count command, which estimatedDocumentCount uses in its implementation, was not included in v1 of @@ -171,66 +172,66 @@ encountering errors.

      Parameters

      Returns Promise<number>

    • Find a document and delete it in one atomic operation. Requires a write lock for the duration of the operation.

      +
    • Find a document and replace it in one atomic operation. Requires a write lock for the duration of the operation.

      +

    Returns Promise<ModifyResult<TSchema>>

  • Parameters

    Returns Promise<null | WithId<TSchema>>

  • Parameters

    Returns Promise<null | WithId<TSchema>>

  • Parameters

    Returns Promise<null | WithId<TSchema>>

    • Find a document and update it in one atomic operation. Requires a write lock for the duration of the operation.

      +

    Returns Promise<ModifyResult<TSchema>>

  • Parameters

    Returns Promise<null | WithId<TSchema>>

  • Parameters

    Returns Promise<null | WithId<TSchema>>

  • Parameters

    Returns Promise<null | WithId<TSchema>>

  • Returns Promise<ModifyResult<TSchema>>

  • Parameters

    Returns Promise<null | WithId<TSchema>>

  • Parameters

    Returns Promise<null | WithId<TSchema>>

  • Parameters

    Returns Promise<null | WithId<TSchema>>

    • Checks if one or more indexes exist on the collection, fails on first non-existing index

      +

    Returns Promise<IndexDescriptionInfo[]>

  • Parameters

    Returns Promise<IndexDescriptionCompact>

  • Parameters

    Returns Promise<IndexDescriptionCompact | IndexDescriptionInfo[]>

  • Parameters

    Returns Promise<IndexDescriptionInfo[]>

    • Checks if one or more indexes exist on the collection, fails on first non-existing index

      Parameters

      • indexes: string | string[]

        One or more index names to check.

        -
      • Optionaloptions: AbstractCursorOptions

        Optional settings for the command

        -

      Returns Promise<boolean>

    Returns Promise<boolean>

    • Initiate an In order bulk write operation. Operations will be serially executed in the order they are added, creating a new operation for each switch in types.

      +

    Returns Promise<IndexDescriptionInfo[]>

  • Parameters

    Returns Promise<IndexDescriptionCompact>

  • Parameters

    Returns Promise<IndexDescriptionCompact | IndexDescriptionInfo[]>

  • Returns Promise<IndexDescriptionCompact>

    • Initiate an In order bulk write operation. Operations will be serially executed in the order they are added, creating a new operation for each switch in types.

      Parameters

      Returns OrderedBulkOperation

      MongoNotConnectedError

      NOTE: MongoClient must be connected prior to calling this method due to a known limitation in this legacy implementation. However, collection.bulkWrite() provides an equivalent API that does not require prior connecting.

      -
    • Initiate an Out of order batch write operation. All operations will be buffered into insert/update/remove commands executed out of order.

      +
    • Initiate an Out of order batch write operation. All operations will be buffered into insert/update/remove commands executed out of order.

      Parameters

      Returns UnorderedBulkOperation

      MongoNotConnectedError

      NOTE: MongoClient must be connected prior to calling this method due to a known limitation in this legacy implementation. However, collection.bulkWrite() provides an equivalent API that does not require prior connecting.

      -
    • Inserts a single document into MongoDB. If documents passed in do not contain the _id field, +

    Returns Promise<InsertManyResult<TSchema>>

    • Returns if the collection is a capped collection

      +

    Returns Promise<InsertOneResult<TSchema>>

    • Returns if the collection is a capped collection

      Parameters

      Returns Promise<boolean>

    Returns Promise<boolean>

    Returns Promise<Document>

    • Update multiple documents in a collection

      +

    Returns Promise<Document | UpdateResult<TSchema>>

    • Update a single document in a collection

      +

    Returns Promise<UpdateResult<TSchema>>

    • Updates a search index by replacing the existing index definition with the provided definition.

      +

    Returns Promise<UpdateResult<TSchema>>

    • Updates a search index by replacing the existing index definition with the provided definition.

      Parameters

      • name: string

        The name of the search index to update.

      • definition: Document

        The new search index definition.

      Returns Promise<void>

      Only available when used against a 7.0+ Atlas cluster.

      -
    +

    In iterator mode, if a next() call throws a timeout error, it will attempt to resume the change stream. +The next call can just be retried after this succeeds.

    +
    const changeStream = collection.watch([], { timeoutMS: 100 });
    try {
    await changeStream.next();
    } catch (e) {
    if (e instanceof MongoOperationTimeoutError && !changeStream.closed) {
    await changeStream.next();
    }
    throw e;
    } +
    + +

    In emitter mode, if the change stream goes timeoutMS without emitting a change event, it will +emit an error event that returns a MongoOperationTimeoutError, but will not close the change +stream unless the resume attempt fails. There is no need to re-establish change listeners as +this will automatically continue emitting change events once the resume attempt completes.

    +
    const changeStream = collection.watch([], { timeoutMS: 100 });
    changeStream.on('change', console.log);
    changeStream.on('error', e => {
    if (e instanceof MongoOperationTimeoutError && !changeStream.closed) {
    // do nothing
    } else {
    changeStream.close();
    }
    }); +
    + +
    diff --git a/docs/Next/classes/Db.html b/docs/Next/classes/Db.html index 00c312b9944..c1c9393c437 100644 --- a/docs/Next/classes/Db.html +++ b/docs/Next/classes/Db.html @@ -2,7 +2,7 @@
    import { MongoClient } from 'mongodb';

    interface Pet {
    name: string;
    kind: 'dog' | 'cat' | 'fish';
    }

    const client = new MongoClient('mongodb://localhost:27017');
    const db = client.db();

    // Create a collection that validates our union
    await db.createCollection<Pet>('pets', {
    validator: { $expr: { $in: ['$kind', ['dog', 'cat', 'fish']] } }
    })
    -

    Constructors

    Constructors

    Properties

    Methods

    admin aggregate @@ -40,20 +41,20 @@

    Parameters

    • client: MongoClient

      The MongoClient for the database.

    • databaseName: string

      The name of the database this instance represents.

    • Optionaloptions: DbOptions

      Optional settings for Db construction.

      -

    Returns Db

    Properties

    SYSTEM_COMMAND_COLLECTION: string = CONSTANTS.SYSTEM_COMMAND_COLLECTION
    SYSTEM_INDEX_COLLECTION: string = CONSTANTS.SYSTEM_INDEX_COLLECTION
    SYSTEM_JS_COLLECTION: string = CONSTANTS.SYSTEM_JS_COLLECTION
    SYSTEM_NAMESPACE_COLLECTION: string = CONSTANTS.SYSTEM_NAMESPACE_COLLECTION
    SYSTEM_PROFILE_COLLECTION: string = CONSTANTS.SYSTEM_PROFILE_COLLECTION
    SYSTEM_USER_COLLECTION: string = CONSTANTS.SYSTEM_USER_COLLECTION

    Accessors

    • get readPreference(): ReadPreference
    • The current readPreference of the Db. If not explicitly defined for +

    Returns Db

    Properties

    SYSTEM_COMMAND_COLLECTION: string = CONSTANTS.SYSTEM_COMMAND_COLLECTION
    SYSTEM_INDEX_COLLECTION: string = CONSTANTS.SYSTEM_INDEX_COLLECTION
    SYSTEM_JS_COLLECTION: string = CONSTANTS.SYSTEM_JS_COLLECTION
    SYSTEM_NAMESPACE_COLLECTION: string = CONSTANTS.SYSTEM_NAMESPACE_COLLECTION
    SYSTEM_PROFILE_COLLECTION: string = CONSTANTS.SYSTEM_PROFILE_COLLECTION
    SYSTEM_USER_COLLECTION: string = CONSTANTS.SYSTEM_USER_COLLECTION

    Accessors

    • get readPreference(): ReadPreference
    • The current readPreference of the Db. If not explicitly defined for this Db, will be inherited from the parent MongoClient

      -

      Returns ReadPreference

    • get secondaryOk(): boolean
    • Check if a secondary can be used (because the read preference is not set to primary)

      -

      Returns boolean

    Methods

    • get secondaryOk(): boolean
    • Check if a secondary can be used (because the read preference is not set to primary)

      +

      Returns boolean

    • get timeoutMS(): undefined | number
    • Returns undefined | number

    Methods

    • Returns a reference to a MongoDB Collection. If it does not exist it will be created implicitly.

      +

    Returns AggregationCursor<T>

    • Returns a reference to a MongoDB Collection. If it does not exist it will be created implicitly.

      Collection namespace validation is performed server-side.

      Type Parameters

      Parameters

      Returns Collection<TSchema>

      return the new Collection instance

      -
    • Execute a command

      +

    Returns Promise<Collection<Document>[]>

    • Execute a command

      Parameters

      Returns Promise<Document>

      This command does not inherit options from the MongoClient.

      @@ -75,37 +76,37 @@
    • writeConcern - sourced from writeConcern set on the TransactionOptions

    Attaching any of the above fields to the command will have no effect as the driver will overwrite the value.

    -
    • Create a new collection on a server with the specified options. Use this to create capped collections. +

    • Creates an index on the db and collection.

      +

    Returns Promise<Collection<TSchema>>

    • Creates an index on the db and collection.

      Parameters

      • name: string

        Name of the collection to create the index on.

      • indexSpec: IndexSpecification

        Specify the field to index, or an index specification

      • Optionaloptions: CreateIndexesOptions

        Optional settings for the command

        -

      Returns Promise<string>

    • Drop a collection from the database, removing it permanently. New accesses will create a new collection.

      +

    Returns Promise<string>

    • Drop a collection from the database, removing it permanently. New accesses will create a new collection.

      Parameters

      • name: string

        Name of collection to drop

      • Optionaloptions: DropCollectionOptions

        Optional settings for the command

        -

      Returns Promise<boolean>

    • Drop a database, removing it permanently from the server.

      +

    Returns Promise<boolean>

    • Drop a database, removing it permanently from the server.

      Parameters

      Returns Promise<boolean>

    • Retrieves this collections index info.

      +

    Returns Promise<boolean>

    Returns Promise<IndexDescriptionInfo[]>

  • Parameters

    Returns Promise<IndexDescriptionCompact>

  • Parameters

    Returns Promise<IndexDescriptionCompact | IndexDescriptionInfo[]>

  • Parameters

    • name: string

    Returns Promise<IndexDescriptionCompact>

    • Retrieve the current profiling Level for MongoDB

      +

    Returns ListCollectionsCursor<Pick<CollectionInfo, "type" | "name">>

  • Parameters

    Returns ListCollectionsCursor<CollectionInfo>

  • Type Parameters

    Parameters

    Returns ListCollectionsCursor<T>

    • Retrieve the current profiling Level for MongoDB

      Parameters

      Returns Promise<string>

    • Remove a user from a database

      +

    Returns Promise<string>

    • Remove a user from a database

      Parameters

      Returns Promise<boolean>

    Returns Promise<boolean>

    • Rename a collection.

      Type Parameters

      Parameters

      • fromCollection: string

        Name of current collection to rename

      • toCollection: string

        New name of of the collection

      • Optionaloptions: RenameOptions

        Optional settings for the command

      Returns Promise<Collection<TSchema>>

      This operation does not inherit options from the MongoClient.

      -
    • A low level cursor API providing basic driver functionality:

      • ClientSession management
      • ReadPreference for server selection
      • @@ -113,12 +114,12 @@

      Parameters

      • command: Document

        The command that will start a cursor on the server.

      • Optionaloptions: RunCursorCommandOptions

        Configurations for running the command, bson options will apply to getMores

        -

      Returns RunCommandCursor

    • Set the current profiling level of MongoDB

      +

    Returns RunCommandCursor

    • Get all the db statistics.

      +

    Returns Promise<ProfilingLevel>

    Returns Promise<Document>

    • Create a new Change Stream, watching for new changes (insertions, updates, replacements, deletions, and invalidations) in this database. Will ignore all changes to system collections.

      Type Parameters

      • TSchema extends Document = Document

        Type of the data being detected by the change stream

        @@ -130,4 +131,16 @@
      • The first is to provide the schema that may be defined for all the collections within this database
      • The second is to override the shape of the change stream document entirely, if it is not provided the type will default to ChangeStreamDocument of the first argument
      -
    +

    In iterator mode, if a next() call throws a timeout error, it will attempt to resume the change stream. +The next call can just be retried after this succeeds.

    +
    const changeStream = collection.watch([], { timeoutMS: 100 });
    try {
    await changeStream.next();
    } catch (e) {
    if (e instanceof MongoOperationTimeoutError && !changeStream.closed) {
    await changeStream.next();
    }
    throw e;
    } +
    + +

    In emitter mode, if the change stream goes timeoutMS without emitting a change event, it will +emit an error event that returns a MongoOperationTimeoutError, but will not close the change +stream unless the resume attempt fails. There is no need to re-establish change listeners as +this will automatically continue emitting change events once the resume attempt completes.

    +
    const changeStream = collection.watch([], { timeoutMS: 100 });
    changeStream.on('change', console.log);
    changeStream.on('error', e => {
    if (e instanceof MongoOperationTimeoutError && !changeStream.closed) {
    // do nothing
    } else {
    changeStream.close();
    }
    }); +
    + +
    diff --git a/docs/Next/classes/ExplainableCursor.html b/docs/Next/classes/ExplainableCursor.html new file mode 100644 index 00000000000..83b15f0fa3b --- /dev/null +++ b/docs/Next/classes/ExplainableCursor.html @@ -0,0 +1,510 @@ +ExplainableCursor | mongodb

    Class ExplainableCursor<TSchema>Abstract

    A base class for any cursors that have explain() methods.

    +

    Type Parameters

    • TSchema

    Hierarchy (view full)

    Properties

    [asyncDispose]: (() => Promise<void>)

    An alias for AbstractCursor.close|AbstractCursor.close().

    +
    captureRejections: boolean

    Value: boolean

    +

    Change the default captureRejections option on all new EventEmitter objects.

    +

    v13.4.0, v12.16.0

    +
    captureRejectionSymbol: typeof captureRejectionSymbol

    Value: Symbol.for('nodejs.rejection')

    +

    See how to write a custom rejection handler.

    +

    v13.4.0, v12.16.0

    +
    CLOSE: "close" = ...
    defaultMaxListeners: number

    By default, a maximum of 10 listeners can be registered for any single +event. This limit can be changed for individual EventEmitter instances +using the emitter.setMaxListeners(n) method. To change the default +for allEventEmitter instances, the events.defaultMaxListeners property +can be used. If this value is not a positive number, a RangeError is thrown.

    +

    Take caution when setting the events.defaultMaxListeners because the +change affects all EventEmitter instances, including those created before +the change is made. However, calling emitter.setMaxListeners(n) still has +precedence over events.defaultMaxListeners.

    +

    This is not a hard limit. The EventEmitter instance will allow +more listeners to be added but will output a trace warning to stderr indicating +that a "possible EventEmitter memory leak" has been detected. For any single +EventEmitter, the emitter.getMaxListeners() and emitter.setMaxListeners() methods can be used to +temporarily avoid this warning:

    +
    import { EventEmitter } from 'node:events';
    const emitter = new EventEmitter();
    emitter.setMaxListeners(emitter.getMaxListeners() + 1);
    emitter.once('event', () => {
    // do stuff
    emitter.setMaxListeners(Math.max(emitter.getMaxListeners() - 1, 0));
    }); +
    + +

    The --trace-warnings command-line flag can be used to display the +stack trace for such warnings.

    +

    The emitted warning can be inspected with process.on('warning') and will +have the additional emitter, type, and count properties, referring to +the event emitter instance, the event's name and the number of attached +listeners, respectively. +Its name property is set to 'MaxListenersExceededWarning'.

    +

    v0.11.2

    +
    errorMonitor: typeof errorMonitor

    This symbol shall be used to install a listener for only monitoring 'error' events. Listeners installed using this symbol are called before the regular 'error' listeners are called.

    +

    Installing a listener using this symbol does not change the behavior once an 'error' event is emitted. Therefore, the process will still crash if no +regular 'error' listener is installed.

    +

    v13.6.0, v12.17.0

    +

    Accessors

    • get closed(): boolean
    • The cursor is closed and all remaining locally buffered documents have been iterated.

      +

      Returns boolean

    • get id(): undefined | Long
    • The cursor has no id until it receives a response from the initial cursor creating command.

      +

      It is non-zero for as long as the database has an open cursor.

      +

      The initiating command may receive a zero id if the entire result is in the firstBatch.

      +

      Returns undefined | Long

    • get killed(): boolean
    • A killCursors command was attempted on this cursor. +This is performed if the cursor id is non zero.

      +

      Returns boolean

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    • Add a cursor flag to the cursor

      +

      Parameters

      • flag:
            | "tailable"
            | "oplogReplay"
            | "noCursorTimeout"
            | "awaitData"
            | "exhaust"
            | "partial"

        The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial' -.

        +
      • value: boolean

        The flag boolean value.

        +

      Returns this

    • Frees any client-side resources used by the cursor.

      +

      Parameters

      • Optionaloptions: {
            timeoutMS?: number;
        }
        • OptionaltimeoutMS?: number

      Returns Promise<void>

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments +to each.

      +

      Returns true if the event had listeners, false otherwise.

      +
      import { EventEmitter } from 'node:events';
      const myEmitter = new EventEmitter();

      // First listener
      myEmitter.on('event', function firstListener() {
      console.log('Helloooo! first listener');
      });
      // Second listener
      myEmitter.on('event', function secondListener(arg1, arg2) {
      console.log(`event with parameters ${arg1}, ${arg2} in second listener`);
      });
      // Third listener
      myEmitter.on('event', function thirdListener(...args) {
      const parameters = args.join(', ');
      console.log(`event with parameters ${parameters} in third listener`);
      });

      console.log(myEmitter.listeners('event'));

      myEmitter.emit('event', 1, 2, 3, 4, 5);

      // Prints:
      // [
      // [Function: firstListener],
      // [Function: secondListener],
      // [Function: thirdListener]
      // ]
      // Helloooo! first listener
      // event with parameters 1, 2 in second listener
      // event with parameters 1, 2, 3, 4, 5 in third listener +
      + +

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns boolean

      v0.1.26

      +
    • Returns an array listing the events for which the emitter has registered +listeners. The values in the array are strings or Symbols.

      +
      import { EventEmitter } from 'node:events';

      const myEE = new EventEmitter();
      myEE.on('foo', () => {});
      myEE.on('bar', () => {});

      const sym = Symbol('symbol');
      myEE.on(sym, () => {});

      console.log(myEE.eventNames());
      // Prints: [ 'foo', 'bar', Symbol(symbol) ] +
      + +

      Returns string[]

      v6.0.0

      +
    • Iterates over all the documents for this cursor using the iterator, callback pattern.

      +

      If the iterator returns false, iteration will stop.

      +

      Parameters

      • iterator: ((doc: TSchema) => boolean | void)

        The iteration callback.

        +
          • (doc): boolean | void
          • Parameters

            Returns boolean | void

      Returns Promise<void>

        +
      • Will be removed in a future release. Use for await...of instead.
      • +
      +
    • Returns the number of listeners listening for the event named eventName. +If listener is provided, it will return how many times the listener is found +in the list of the listeners of the event.

      +

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns number

      v3.2.0

      +
    • Map all documents using the provided function +If there is a transform set on the cursor, that will be called first and the result passed to +this function's transform.

      +

      Type Parameters

      • T = any

      Parameters

      • transform: ((doc: TSchema) => T)

        The mapping transformation method.

        +

      Returns AbstractCursor<T, AbstractCursorEvents>

      Note Cursors use null internally to indicate that there are no more documents in the cursor. Providing a mapping +function that maps values to null will result in the cursor closing itself before it has finished iterating +all documents. This will not result in a memory leak, just surprising behavior. For example:

      +
      const cursor = collection.find({});
      cursor.map(() => null);

      const documents = await cursor.toArray();
      // documents is always [], regardless of how many documents are in the collection. +
      + +

      Other falsey values are allowed:

      +
      const cursor = collection.find({});
      cursor.map(() => '');

      const documents = await cursor.toArray();
      // documents is now an array of empty strings +
      + +

      Note for Typescript Users: adding a transform changes the return type of the iteration of this cursor, +it does not return a new instance of a cursor. This means when calling map, +you should always assign the result to a new variable in order to get a correctly typed cursor variable. +Take note of the following example:

      +
      const cursor: FindCursor<Document> = coll.find();
      const mappedCursor: FindCursor<number> = cursor.map(doc => Object.keys(doc).length);
      const keyCounts: number[] = await mappedCursor.toArray(); // cursor.toArray() still returns Document[] +
      + +
    • Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)

      +

      Parameters

      • value: number

        Number of milliseconds to wait before aborting the query.

        +

      Returns this

    • Adds the listener function to the end of the listeners array for the event +named eventName. No checks are made to see if the listener has already +been added. Multiple calls passing the same combination of eventName and +listener will result in the listener being added, and called, multiple times.

      +
      server.on('connection', (stream) => {
      console.log('someone connected!');
      }); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      By default, event listeners are invoked in the order they are added. The emitter.prependListener() method can be used as an alternative to add the +event listener to the beginning of the listeners array.

      +
      import { EventEmitter } from 'node:events';
      const myEE = new EventEmitter();
      myEE.on('foo', () => console.log('a'));
      myEE.prependListener('foo', () => console.log('b'));
      myEE.emit('foo');
      // Prints:
      // b
      // a +
      + +

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns this

      v0.1.101

      +
    • Adds the listener function to the end of the listeners array for the event +named eventName. No checks are made to see if the listener has already +been added. Multiple calls passing the same combination of eventName and +listener will result in the listener being added, and called, multiple times.

      +
      server.on('connection', (stream) => {
      console.log('someone connected!');
      }); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      By default, event listeners are invoked in the order they are added. The emitter.prependListener() method can be used as an alternative to add the +event listener to the beginning of the listeners array.

      +
      import { EventEmitter } from 'node:events';
      const myEE = new EventEmitter();
      myEE.on('foo', () => console.log('a'));
      myEE.prependListener('foo', () => console.log('b'));
      myEE.emit('foo');
      // Prints:
      // b
      // a +
      + +

      Parameters

      • event: CommonEvents
      • listener: ((eventName: string | symbol, listener: GenericListener) => void)

        The callback function

        +
          • (eventName, listener): void
          • Parameters

            Returns void

      Returns this

      v0.1.101

      +
    • Adds the listener function to the end of the listeners array for the event +named eventName. No checks are made to see if the listener has already +been added. Multiple calls passing the same combination of eventName and +listener will result in the listener being added, and called, multiple times.

      +
      server.on('connection', (stream) => {
      console.log('someone connected!');
      }); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      By default, event listeners are invoked in the order they are added. The emitter.prependListener() method can be used as an alternative to add the +event listener to the beginning of the listeners array.

      +
      import { EventEmitter } from 'node:events';
      const myEE = new EventEmitter();
      myEE.on('foo', () => console.log('a'));
      myEE.prependListener('foo', () => console.log('b'));
      myEE.emit('foo');
      // Prints:
      // b
      // a +
      + +

      Parameters

      Returns this

      v0.1.101

      +
    • Adds a one-time listener function for the event named eventName. The +next time eventName is triggered, this listener is removed and then invoked.

      +
      server.once('connection', (stream) => {
      console.log('Ah, we have our first user!');
      }); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      By default, event listeners are invoked in the order they are added. The emitter.prependOnceListener() method can be used as an alternative to add the +event listener to the beginning of the listeners array.

      +
      import { EventEmitter } from 'node:events';
      const myEE = new EventEmitter();
      myEE.once('foo', () => console.log('a'));
      myEE.prependOnceListener('foo', () => console.log('b'));
      myEE.emit('foo');
      // Prints:
      // b
      // a +
      + +

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns this

      v0.3.0

      +
    • Adds a one-time listener function for the event named eventName. The +next time eventName is triggered, this listener is removed and then invoked.

      +
      server.once('connection', (stream) => {
      console.log('Ah, we have our first user!');
      }); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      By default, event listeners are invoked in the order they are added. The emitter.prependOnceListener() method can be used as an alternative to add the +event listener to the beginning of the listeners array.

      +
      import { EventEmitter } from 'node:events';
      const myEE = new EventEmitter();
      myEE.once('foo', () => console.log('a'));
      myEE.prependOnceListener('foo', () => console.log('b'));
      myEE.emit('foo');
      // Prints:
      // b
      // a +
      + +

      Parameters

      • event: CommonEvents
      • listener: ((eventName: string | symbol, listener: GenericListener) => void)

        The callback function

        +
          • (eventName, listener): void
          • Parameters

            Returns void

      Returns this

      v0.3.0

      +
    • Adds a one-time listener function for the event named eventName. The +next time eventName is triggered, this listener is removed and then invoked.

      +
      server.once('connection', (stream) => {
      console.log('Ah, we have our first user!');
      }); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      By default, event listeners are invoked in the order they are added. The emitter.prependOnceListener() method can be used as an alternative to add the +event listener to the beginning of the listeners array.

      +
      import { EventEmitter } from 'node:events';
      const myEE = new EventEmitter();
      myEE.once('foo', () => console.log('a'));
      myEE.prependOnceListener('foo', () => console.log('b'));
      myEE.emit('foo');
      // Prints:
      // b
      // a +
      + +

      Parameters

      Returns this

      v0.3.0

      +
    • Adds the listener function to the beginning of the listeners array for the +event named eventName. No checks are made to see if the listener has +already been added. Multiple calls passing the same combination of eventName +and listener will result in the listener being added, and called, multiple times.

      +
      server.prependListener('connection', (stream) => {
      console.log('someone connected!');
      }); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns this

      v6.0.0

      +
    • Adds the listener function to the beginning of the listeners array for the +event named eventName. No checks are made to see if the listener has +already been added. Multiple calls passing the same combination of eventName +and listener will result in the listener being added, and called, multiple times.

      +
      server.prependListener('connection', (stream) => {
      console.log('someone connected!');
      }); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      Parameters

      • event: CommonEvents
      • listener: ((eventName: string | symbol, listener: GenericListener) => void)

        The callback function

        +
          • (eventName, listener): void
          • Parameters

            Returns void

      Returns this

      v6.0.0

      +
    • Adds the listener function to the beginning of the listeners array for the +event named eventName. No checks are made to see if the listener has +already been added. Multiple calls passing the same combination of eventName +and listener will result in the listener being added, and called, multiple times.

      +
      server.prependListener('connection', (stream) => {
      console.log('someone connected!');
      }); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      Parameters

      Returns this

      v6.0.0

      +
    • Adds a one-timelistener function for the event named eventName to the beginning of the listeners array. The next time eventName is triggered, this +listener is removed, and then invoked.

      +
      server.prependOnceListener('connection', (stream) => {
      console.log('Ah, we have our first user!');
      }); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns this

      v6.0.0

      +
    • Adds a one-timelistener function for the event named eventName to the beginning of the listeners array. The next time eventName is triggered, this +listener is removed, and then invoked.

      +
      server.prependOnceListener('connection', (stream) => {
      console.log('Ah, we have our first user!');
      }); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      Parameters

      • event: CommonEvents
      • listener: ((eventName: string | symbol, listener: GenericListener) => void)

        The callback function

        +
          • (eventName, listener): void
          • Parameters

            Returns void

      Returns this

      v6.0.0

      +
    • Adds a one-timelistener function for the event named eventName to the beginning of the listeners array. The next time eventName is triggered, this +listener is removed, and then invoked.

      +
      server.prependOnceListener('connection', (stream) => {
      console.log('Ah, we have our first user!');
      }); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      Parameters

      Returns this

      v6.0.0

      +
    • Returns a copy of the array of listeners for the event named eventName, +including any wrappers (such as those created by .once()).

      +
      import { EventEmitter } from 'node:events';
      const emitter = new EventEmitter();
      emitter.once('log', () => console.log('log once'));

      // Returns a new Array with a function `onceWrapper` which has a property
      // `listener` which contains the original listener bound above
      const listeners = emitter.rawListeners('log');
      const logFnWrapper = listeners[0];

      // Logs "log once" to the console and does not unbind the `once` event
      logFnWrapper.listener();

      // Logs "log once" to the console and removes the listener
      logFnWrapper();

      emitter.on('log', () => console.log('log persistently'));
      // Will return a new Array with a single function bound by `.on()` above
      const newListeners = emitter.rawListeners('log');

      // Logs "log persistently" twice
      newListeners[0]();
      emitter.emit('log'); +
      + +

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns AbstractCursorEvents[EventKey][]

      v9.4.0

      +
    • Removes all listeners, or those of the specified eventName.

      +

      It is bad practice to remove listeners added elsewhere in the code, +particularly when the EventEmitter instance was created by some other +component or module (e.g. sockets or file streams).

      +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      Type Parameters

      • EventKey extends "close"

      Parameters

      • Optionalevent: string | symbol | EventKey

      Returns this

      v0.1.26

      +
    • Removes the specified listener from the listener array for the event named eventName.

      +
      const callback = (stream) => {
      console.log('someone connected!');
      };
      server.on('connection', callback);
      // ...
      server.removeListener('connection', callback); +
      + +

      removeListener() will remove, at most, one instance of a listener from the +listener array. If any single listener has been added multiple times to the +listener array for the specified eventName, then removeListener() must be +called multiple times to remove each instance.

      +

      Once an event is emitted, all listeners attached to it at the +time of emitting are called in order. This implies that any removeListener() or removeAllListeners() calls after emitting and before the last listener finishes execution +will not remove them fromemit() in progress. Subsequent events behave as expected.

      +
      import { EventEmitter } from 'node:events';
      class MyEmitter extends EventEmitter {}
      const myEmitter = new MyEmitter();

      const callbackA = () => {
      console.log('A');
      myEmitter.removeListener('event', callbackB);
      };

      const callbackB = () => {
      console.log('B');
      };

      myEmitter.on('event', callbackA);

      myEmitter.on('event', callbackB);

      // callbackA removes listener callbackB but it will still be called.
      // Internal listener array at time of emit [callbackA, callbackB]
      myEmitter.emit('event');
      // Prints:
      // A
      // B

      // callbackB is now removed.
      // Internal listener array [callbackA]
      myEmitter.emit('event');
      // Prints:
      // A +
      + +

      Because listeners are managed using an internal array, calling this will +change the position indices of any listener registered after the listener +being removed. This will not impact the order in which listeners are called, +but it means that any copies of the listener array as returned by +the emitter.listeners() method will need to be recreated.

      +

      When a single function has been added as a handler multiple times for a single +event (as in the example below), removeListener() will remove the most +recently added instance. In the example the once('ping') listener is removed:

      +
      import { EventEmitter } from 'node:events';
      const ee = new EventEmitter();

      function pong() {
      console.log('pong');
      }

      ee.on('ping', pong);
      ee.once('ping', pong);
      ee.removeListener('ping', pong);

      ee.emit('ping');
      ee.emit('ping'); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns this

      v0.1.26

      +
    • Removes the specified listener from the listener array for the event named eventName.

      +
      const callback = (stream) => {
      console.log('someone connected!');
      };
      server.on('connection', callback);
      // ...
      server.removeListener('connection', callback); +
      + +

      removeListener() will remove, at most, one instance of a listener from the +listener array. If any single listener has been added multiple times to the +listener array for the specified eventName, then removeListener() must be +called multiple times to remove each instance.

      +

      Once an event is emitted, all listeners attached to it at the +time of emitting are called in order. This implies that any removeListener() or removeAllListeners() calls after emitting and before the last listener finishes execution +will not remove them fromemit() in progress. Subsequent events behave as expected.

      +
      import { EventEmitter } from 'node:events';
      class MyEmitter extends EventEmitter {}
      const myEmitter = new MyEmitter();

      const callbackA = () => {
      console.log('A');
      myEmitter.removeListener('event', callbackB);
      };

      const callbackB = () => {
      console.log('B');
      };

      myEmitter.on('event', callbackA);

      myEmitter.on('event', callbackB);

      // callbackA removes listener callbackB but it will still be called.
      // Internal listener array at time of emit [callbackA, callbackB]
      myEmitter.emit('event');
      // Prints:
      // A
      // B

      // callbackB is now removed.
      // Internal listener array [callbackA]
      myEmitter.emit('event');
      // Prints:
      // A +
      + +

      Because listeners are managed using an internal array, calling this will +change the position indices of any listener registered after the listener +being removed. This will not impact the order in which listeners are called, +but it means that any copies of the listener array as returned by +the emitter.listeners() method will need to be recreated.

      +

      When a single function has been added as a handler multiple times for a single +event (as in the example below), removeListener() will remove the most +recently added instance. In the example the once('ping') listener is removed:

      +
      import { EventEmitter } from 'node:events';
      const ee = new EventEmitter();

      function pong() {
      console.log('pong');
      }

      ee.on('ping', pong);
      ee.once('ping', pong);
      ee.removeListener('ping', pong);

      ee.emit('ping');
      ee.emit('ping'); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      Parameters

      Returns this

      v0.1.26

      +
    • Removes the specified listener from the listener array for the event named eventName.

      +
      const callback = (stream) => {
      console.log('someone connected!');
      };
      server.on('connection', callback);
      // ...
      server.removeListener('connection', callback); +
      + +

      removeListener() will remove, at most, one instance of a listener from the +listener array. If any single listener has been added multiple times to the +listener array for the specified eventName, then removeListener() must be +called multiple times to remove each instance.

      +

      Once an event is emitted, all listeners attached to it at the +time of emitting are called in order. This implies that any removeListener() or removeAllListeners() calls after emitting and before the last listener finishes execution +will not remove them fromemit() in progress. Subsequent events behave as expected.

      +
      import { EventEmitter } from 'node:events';
      class MyEmitter extends EventEmitter {}
      const myEmitter = new MyEmitter();

      const callbackA = () => {
      console.log('A');
      myEmitter.removeListener('event', callbackB);
      };

      const callbackB = () => {
      console.log('B');
      };

      myEmitter.on('event', callbackA);

      myEmitter.on('event', callbackB);

      // callbackA removes listener callbackB but it will still be called.
      // Internal listener array at time of emit [callbackA, callbackB]
      myEmitter.emit('event');
      // Prints:
      // A
      // B

      // callbackB is now removed.
      // Internal listener array [callbackA]
      myEmitter.emit('event');
      // Prints:
      // A +
      + +

      Because listeners are managed using an internal array, calling this will +change the position indices of any listener registered after the listener +being removed. This will not impact the order in which listeners are called, +but it means that any copies of the listener array as returned by +the emitter.listeners() method will need to be recreated.

      +

      When a single function has been added as a handler multiple times for a single +event (as in the example below), removeListener() will remove the most +recently added instance. In the example the once('ping') listener is removed:

      +
      import { EventEmitter } from 'node:events';
      const ee = new EventEmitter();

      function pong() {
      console.log('pong');
      }

      ee.on('ping', pong);
      ee.once('ping', pong);
      ee.removeListener('ping', pong);

      ee.emit('ping');
      ee.emit('ping'); +
      + +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      Parameters

      Returns this

      v0.1.26

      +
    • Rewind this cursor to its uninitialized state. Any options that are present on the cursor will +remain in effect. Iterating this cursor will cause new queries to be sent to the server, even +if the resultant data has already been retrieved by this cursor.

      +

      Returns void

    • By default EventEmitters will print a warning if more than 10 listeners are +added for a particular event. This is a useful default that helps finding +memory leaks. The emitter.setMaxListeners() method allows the limit to be +modified for this specific EventEmitter instance. The value can be set to Infinity (or 0) to indicate an unlimited number of listeners.

      +

      Returns a reference to the EventEmitter, so that calls can be chained.

      +

      Parameters

      • n: number

      Returns this

      v0.3.5

      +
    • Returns an array of documents. The caller is responsible for making sure that there +is enough memory to store the results. Note that the array only contains partial +results when this cursor had been previously accessed. In that case, +cursor.rewind() can be used to reset the cursor.

      +

      Returns Promise<TSchema[]>

    • Experimental

      Listens once to the abort event on the provided signal.

      +

      Listening to the abort event on abort signals is unsafe and may +lead to resource leaks since another third party with the signal can +call e.stopImmediatePropagation(). Unfortunately Node.js cannot change +this since it would violate the web standard. Additionally, the original +API makes it easy to forget to remove listeners.

      +

      This API allows safely using AbortSignals in Node.js APIs by solving these +two issues by listening to the event such that stopImmediatePropagation does +not prevent the listener from running.

      +

      Returns a disposable so that it may be unsubscribed from more easily.

      +
      import { addAbortListener } from 'node:events';

      function example(signal) {
      let disposable;
      try {
      signal.addEventListener('abort', (e) => e.stopImmediatePropagation());
      disposable = addAbortListener(signal, (e) => {
      // Do something when signal is aborted.
      });
      } finally {
      disposable?.[Symbol.dispose]();
      }
      } +
      + +

      Parameters

      • signal: AbortSignal
      • resource: ((event: Event) => void)
          • (event): void
          • Parameters

            • event: Event

            Returns void

      Returns Disposable

      Disposable that removes the abort listener.

      +

      v20.5.0

      +
    • Returns a copy of the array of listeners for the event named eventName.

      +

      For EventEmitters this behaves exactly the same as calling .listeners on +the emitter.

      +

      For EventTargets this is the only way to get the event listeners for the +event target. This is useful for debugging and diagnostic purposes.

      +
      import { getEventListeners, EventEmitter } from 'node:events';

      {
      const ee = new EventEmitter();
      const listener = () => console.log('Events are fun');
      ee.on('foo', listener);
      console.log(getEventListeners(ee, 'foo')); // [ [Function: listener] ]
      }
      {
      const et = new EventTarget();
      const listener = () => console.log('Events are fun');
      et.addEventListener('foo', listener);
      console.log(getEventListeners(et, 'foo')); // [ [Function: listener] ]
      } +
      + +

      Parameters

      • emitter: EventEmitter<DefaultEventMap> | EventTarget
      • name: string | symbol

      Returns Function[]

      v15.2.0, v14.17.0

      +
    • Returns the currently set max amount of listeners.

      +

      For EventEmitters this behaves exactly the same as calling .getMaxListeners on +the emitter.

      +

      For EventTargets this is the only way to get the max event listeners for the +event target. If the number of event handlers on a single EventTarget exceeds +the max set, the EventTarget will print a warning.

      +
      import { getMaxListeners, setMaxListeners, EventEmitter } from 'node:events';

      {
      const ee = new EventEmitter();
      console.log(getMaxListeners(ee)); // 10
      setMaxListeners(11, ee);
      console.log(getMaxListeners(ee)); // 11
      }
      {
      const et = new EventTarget();
      console.log(getMaxListeners(et)); // 10
      setMaxListeners(11, et);
      console.log(getMaxListeners(et)); // 11
      } +
      + +

      Parameters

      • emitter: EventEmitter<DefaultEventMap> | EventTarget

      Returns number

      v19.9.0

      +
    • A class method that returns the number of listeners for the given eventName registered on the given emitter.

      +
      import { EventEmitter, listenerCount } from 'node:events';

      const myEmitter = new EventEmitter();
      myEmitter.on('event', () => {});
      myEmitter.on('event', () => {});
      console.log(listenerCount(myEmitter, 'event'));
      // Prints: 2 +
      + +

      Parameters

      • emitter: EventEmitter<DefaultEventMap>

        The emitter to query

        +
      • eventName: string | symbol

        The event name

        +

      Returns number

      v0.9.12

      +

      Since v3.2.0 - Use listenerCount instead.

      +
    • import { on, EventEmitter } from 'node:events';
      import process from 'node:process';

      const ee = new EventEmitter();

      // Emit later on
      process.nextTick(() => {
      ee.emit('foo', 'bar');
      ee.emit('foo', 42);
      });

      for await (const event of on(ee, 'foo')) {
      // The execution of this inner block is synchronous and it
      // processes one event at a time (even with await). Do not use
      // if concurrent execution is required.
      console.log(event); // prints ['bar'] [42]
      }
      // Unreachable here +
      + +

      Returns an AsyncIterator that iterates eventName events. It will throw +if the EventEmitter emits 'error'. It removes all listeners when +exiting the loop. The value returned by each iteration is an array +composed of the emitted event arguments.

      +

      An AbortSignal can be used to cancel waiting on events:

      +
      import { on, EventEmitter } from 'node:events';
      import process from 'node:process';

      const ac = new AbortController();

      (async () => {
      const ee = new EventEmitter();

      // Emit later on
      process.nextTick(() => {
      ee.emit('foo', 'bar');
      ee.emit('foo', 42);
      });

      for await (const event of on(ee, 'foo', { signal: ac.signal })) {
      // The execution of this inner block is synchronous and it
      // processes one event at a time (even with await). Do not use
      // if concurrent execution is required.
      console.log(event); // prints ['bar'] [42]
      }
      // Unreachable here
      })();

      process.nextTick(() => ac.abort()); +
      + +

      Use the close option to specify an array of event names that will end the iteration:

      +
      import { on, EventEmitter } from 'node:events';
      import process from 'node:process';

      const ee = new EventEmitter();

      // Emit later on
      process.nextTick(() => {
      ee.emit('foo', 'bar');
      ee.emit('foo', 42);
      ee.emit('close');
      });

      for await (const event of on(ee, 'foo', { close: ['close'] })) {
      console.log(event); // prints ['bar'] [42]
      }
      // the loop will exit after 'close' is emitted
      console.log('done'); // prints 'done' +
      + +

      Parameters

      • emitter: EventEmitter<DefaultEventMap>
      • eventName: string | symbol
      • Optionaloptions: StaticEventEmitterIteratorOptions

      Returns AsyncIterableIterator<any[]>

      An AsyncIterator that iterates eventName events emitted by the emitter

      +

      v13.6.0, v12.16.0

      +
    • Parameters

      • emitter: EventTarget
      • eventName: string
      • Optionaloptions: StaticEventEmitterIteratorOptions

      Returns AsyncIterableIterator<any[]>

    • Creates a Promise that is fulfilled when the EventEmitter emits the given +event or that is rejected if the EventEmitter emits 'error' while waiting. +The Promise will resolve with an array of all the arguments emitted to the +given event.

      +

      This method is intentionally generic and works with the web platform EventTarget interface, which has no special'error' event +semantics and does not listen to the 'error' event.

      +
      import { once, EventEmitter } from 'node:events';
      import process from 'node:process';

      const ee = new EventEmitter();

      process.nextTick(() => {
      ee.emit('myevent', 42);
      });

      const [value] = await once(ee, 'myevent');
      console.log(value);

      const err = new Error('kaboom');
      process.nextTick(() => {
      ee.emit('error', err);
      });

      try {
      await once(ee, 'myevent');
      } catch (err) {
      console.error('error happened', err);
      } +
      + +

      The special handling of the 'error' event is only used when events.once() is used to wait for another event. If events.once() is used to wait for the +'error' event itself, then it is treated as any other kind of event without +special handling:

      +
      import { EventEmitter, once } from 'node:events';

      const ee = new EventEmitter();

      once(ee, 'error')
      .then(([err]) => console.log('ok', err.message))
      .catch((err) => console.error('error', err.message));

      ee.emit('error', new Error('boom'));

      // Prints: ok boom +
      + +

      An AbortSignal can be used to cancel waiting for the event:

      +
      import { EventEmitter, once } from 'node:events';

      const ee = new EventEmitter();
      const ac = new AbortController();

      async function foo(emitter, event, signal) {
      try {
      await once(emitter, event, { signal });
      console.log('event emitted!');
      } catch (error) {
      if (error.name === 'AbortError') {
      console.error('Waiting for the event was canceled!');
      } else {
      console.error('There was an error', error.message);
      }
      }
      }

      foo(ee, 'foo', ac.signal);
      ac.abort(); // Abort waiting for the event
      ee.emit('foo'); // Prints: Waiting for the event was canceled! +
      + +

      Parameters

      • emitter: EventEmitter<DefaultEventMap>
      • eventName: string | symbol
      • Optionaloptions: StaticEventEmitterOptions

      Returns Promise<any[]>

      v11.13.0, v10.16.0

      +
    • Parameters

      • emitter: EventTarget
      • eventName: string
      • Optionaloptions: StaticEventEmitterOptions

      Returns Promise<any[]>

    • import { setMaxListeners, EventEmitter } from 'node:events';

      const target = new EventTarget();
      const emitter = new EventEmitter();

      setMaxListeners(5, target, emitter); +
      + +

      Parameters

      • Optionaln: number

        A non-negative number. The maximum number of listeners per EventTarget event.

        +
      • Rest...eventTargets: (EventEmitter<DefaultEventMap> | EventTarget)[]

      Returns void

      v15.4.0

      +
    diff --git a/docs/Next/classes/FindCursor.html b/docs/Next/classes/FindCursor.html index 2660e23d52e..5cf03f91bf1 100644 --- a/docs/Next/classes/FindCursor.html +++ b/docs/Next/classes/FindCursor.html @@ -1,4 +1,4 @@ -FindCursor | mongodb

    Class FindCursor<TSchema>

    Type Parameters

    • TSchema = any

    Hierarchy (view full)

    Properties

    [asyncDispose] +FindCursor | mongodb

    Class FindCursor<TSchema>

    Type Parameters

    • TSchema = any

    Hierarchy (view full)

    Properties

    [asyncDispose]: (() => Promise<void>)

    An alias for AbstractCursor.close|AbstractCursor.close().

    -
    captureRejections: boolean

    Value: boolean

    +
    captureRejections: boolean

    Value: boolean

    Change the default captureRejections option on all new EventEmitter objects.

    v13.4.0, v12.16.0

    -
    captureRejectionSymbol: typeof captureRejectionSymbol

    Value: Symbol.for('nodejs.rejection')

    +
    captureRejectionSymbol: typeof captureRejectionSymbol

    Value: Symbol.for('nodejs.rejection')

    See how to write a custom rejection handler.

    v13.4.0, v12.16.0

    -
    CLOSE: "close" = ...
    defaultMaxListeners: number

    By default, a maximum of 10 listeners can be registered for any single +

    CLOSE: "close" = ...
    defaultMaxListeners: number

    By default, a maximum of 10 listeners can be registered for any single event. This limit can be changed for individual EventEmitter instances using the emitter.setMaxListeners(n) method. To change the default for allEventEmitter instances, the events.defaultMaxListeners property @@ -102,79 +103,82 @@ listeners, respectively. Its name property is set to 'MaxListenersExceededWarning'.

    v0.11.2

    -
    errorMonitor: typeof errorMonitor

    This symbol shall be used to install a listener for only monitoring 'error' events. Listeners installed using this symbol are called before the regular 'error' listeners are called.

    +
    errorMonitor: typeof errorMonitor

    This symbol shall be used to install a listener for only monitoring 'error' events. Listeners installed using this symbol are called before the regular 'error' listeners are called.

    Installing a listener using this symbol does not change the behavior once an 'error' event is emitted. Therefore, the process will still crash if no regular 'error' listener is installed.

    v13.6.0, v12.17.0

    -

    Accessors

    • get closed(): boolean
    • The cursor is closed and all remaining locally buffered documents have been iterated.

      -

      Returns boolean

    • get id(): undefined | Long
    • The cursor has no id until it receives a response from the initial cursor creating command.

      +

    Accessors

    • get closed(): boolean
    • The cursor is closed and all remaining locally buffered documents have been iterated.

      +

      Returns boolean

    • get id(): undefined | Long
    • The cursor has no id until it receives a response from the initial cursor creating command.

      It is non-zero for as long as the database has an open cursor.

      The initiating command may receive a zero id if the entire result is in the firstBatch.

      -

      Returns undefined | Long

    • get killed(): boolean
    • A killCursors command was attempted on this cursor. This is performed if the cursor id is non zero.

      -

      Returns boolean

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    • Add a cursor flag to the cursor

      Parameters

      • flag:
            | "tailable"
            | "oplogReplay"
            | "noCursorTimeout"
            | "awaitData"
            | "exhaust"
            | "partial"

        The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial' -.

      • value: boolean

        The flag boolean value.

        -

      Returns this

    • Alias for emitter.on(eventName, listener).

      +

    Returns this

    • Add a query modifier to the cursor query

      Parameters

      • name: string

        The query modifier (must start with $, such as $orderby etc)

      • value:
            | string
            | number
            | boolean
            | Document

        The modifier value.

        -

      Returns this

    • Allows disk use for blocking sort operations exceeding 100MB memory. (MongoDB 3.2 or higher)

      +

    Returns this

    • Set the collation options for the cursor.

      +

    Returns this

    • Set the collation options for the cursor.

      Parameters

      • value: CollationOptions

        The cursor collation options (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).

        -

      Returns this

    • Add a comment to the cursor query allowing for tracking the comment in the log.

      +

    Returns this

    • Add a comment to the cursor query allowing for tracking the comment in the log.

      Parameters

      • value: string

        The comment attached to this query.

        -

      Returns this

    • Get the count of documents for this cursor

      +

    Returns this

    • Get the count of documents for this cursor

      Parameters

      Returns Promise<number>

      Use collection.estimatedDocumentCount or collection.countDocuments instead

      -
    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments +

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments to each.

      Returns true if the event had listeners, false otherwise.

      import { EventEmitter } from 'node:events';
      const myEmitter = new EventEmitter();

      // First listener
      myEmitter.on('event', function firstListener() {
      console.log('Helloooo! first listener');
      });
      // Second listener
      myEmitter.on('event', function secondListener(arg1, arg2) {
      console.log(`event with parameters ${arg1}, ${arg2} in second listener`);
      });
      // Third listener
      myEmitter.on('event', function thirdListener(...args) {
      const parameters = args.join(', ');
      console.log(`event with parameters ${parameters} in third listener`);
      });

      console.log(myEmitter.listeners('event'));

      myEmitter.emit('event', 1, 2, 3, 4, 5);

      // Prints:
      // [
      // [Function: firstListener],
      // [Function: secondListener],
      // [Function: thirdListener]
      // ]
      // Helloooo! first listener
      // event with parameters 1, 2 in second listener
      // event with parameters 1, 2, 3, 4, 5 in third listener

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns boolean

      v0.1.26

      -
    • Returns an array listing the events for which the emitter has registered listeners. The values in the array are strings or Symbols.

      import { EventEmitter } from 'node:events';

      const myEE = new EventEmitter();
      myEE.on('foo', () => {});
      myEE.on('bar', () => {});

      const sym = Symbol('symbol');
      myEE.on(sym, () => {});

      console.log(myEE.eventNames());
      // Prints: [ 'foo', 'bar', Symbol(symbol) ]

      Returns string[]

      v6.0.0

      -
    • Iterates over all the documents for this cursor using the iterator, callback pattern.

      If the iterator returns false, iteration will stop.

      Parameters

      • iterator: ((doc: TSchema) => boolean | void)

        The iteration callback.

          • (doc): boolean | void
          • Parameters

            Returns boolean | void

      Returns Promise<void>

      • Will be removed in a future release. Use for await...of instead.
      -
    • Set the cursor hint

      Parameters

      • hint: Hint

        If specified, then the query system will only consider plans using the hinted index.

        -

      Returns this

    • Set the limit for the cursor.

      +

    Returns this

    • Set the limit for the cursor.

      Parameters

      • value: number

        The limit for the cursor query.

        -

      Returns this

    • Returns the number of listeners listening for the event named eventName. +

    Returns this

    • Returns the number of listeners listening for the event named eventName. If listener is provided, it will return how many times the listener is found in the list of the listeners of the event.

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns number

      v3.2.0

      -
    • Map all documents using the provided function If there is a transform set on the cursor, that will be called first and the result passed to this function's transform.

      Type Parameters

      • T

      Parameters

      • transform: ((doc: TSchema) => T)

        The mapping transformation method.

        @@ -195,22 +199,22 @@
        const cursor: FindCursor<Document> = coll.find();
        const mappedCursor: FindCursor<number> = cursor.map(doc => Object.keys(doc).length);
        const keyCounts: number[] = await mappedCursor.toArray(); // cursor.toArray() still returns Document[]
        -
    • Set the cursor max

      Parameters

      • max: Document

        Specify a $max value to specify the exclusive upper bound for a specific index in order to constrain the results of find(). The $max specifies the upper bound for all keys of a specific index in order.

        -

      Returns this

    • Set a maxAwaitTimeMS on a tailing cursor query to allow to customize the timeout value for the option awaitData (Only supported on MongoDB 3.2 or higher, ignored otherwise)

      +

    Returns this

    • Set a maxAwaitTimeMS on a tailing cursor query to allow to customize the timeout value for the option awaitData (Only supported on MongoDB 3.2 or higher, ignored otherwise)

      Parameters

      • value: number

        Number of milliseconds to wait before aborting the tailed query.

        -

      Returns this

    • Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)

      +

    Returns this

    • Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)

      Parameters

      • value: number

        Number of milliseconds to wait before aborting the query.

        -

      Returns this

    • Set the cursor min

      +

    Returns this

    • Set the cursor min

      Parameters

      • min: Document

        Specify a $min value to specify the inclusive lower bound for a specific index in order to constrain the results of find(). The $min specifies the lower bound for all keys of a specific index in order.

        -

      Returns this

    • Alias for emitter.removeListener().

      +

    Returns this

    • Adds the listener function to the end of the listeners array for the event named eventName. No checks are made to see if the listener has already been added. Multiple calls passing the same combination of eventName and listener will result in the listener being added, and called, multiple times.

      @@ -225,7 +229,7 @@

      Returns this

      v0.1.101

      -
    • Adds the listener function to the end of the listeners array for the event +

    • Adds the listener function to the end of the listeners array for the event named eventName. No checks are made to see if the listener has already been added. Multiple calls passing the same combination of eventName and listener will result in the listener being added, and called, multiple times.

      @@ -240,7 +244,7 @@

      Returns this

      v0.1.101

      -
    • Adds the listener function to the end of the listeners array for the event +

    • Adds the listener function to the end of the listeners array for the event named eventName. No checks are made to see if the listener has already been added. Multiple calls passing the same combination of eventName and listener will result in the listener being added, and called, multiple times.

      @@ -255,7 +259,7 @@

      Returns this

      v0.1.101

      -
    • Adds a one-time listener function for the event named eventName. The next time eventName is triggered, this listener is removed and then invoked.

      server.once('connection', (stream) => {
      console.log('Ah, we have our first user!');
      });
      @@ -268,7 +272,7 @@

      Returns this

      v0.3.0

      -
    • Adds a one-time listener function for the event named eventName. The +

    • Adds a one-time listener function for the event named eventName. The next time eventName is triggered, this listener is removed and then invoked.

      server.once('connection', (stream) => {
      console.log('Ah, we have our first user!');
      });
      @@ -281,7 +285,7 @@

      Returns this

      v0.3.0

      -
    • Adds a one-time listener function for the event named eventName. The +

    • Adds a one-time listener function for the event named eventName. The next time eventName is triggered, this listener is removed and then invoked.

      server.once('connection', (stream) => {
      console.log('Ah, we have our first user!');
      });
      @@ -294,7 +298,7 @@

      Returns this

      v0.3.0

      -
    • Add a project stage to the aggregation pipeline

      Type Parameters

      Parameters

      Returns FindCursor<T>

      In order to strictly type this function you must provide an interface that represents the effect of your projection on the result documents.

      By default chaining a projection to your cursor changes the returned type to the generic @@ -360,20 +364,20 @@

      const cursor: FindCursor<{ a: number; b: string }> = coll.find();
      const projectCursor = cursor.project<{ a: number }>({ _id: 0, a: true });
      const aPropOnlyArray: {a: number}[] = await projectCursor.toArray();

      // or always use chaining and save the final cursor

      const cursor = coll.find().project<{ a: string }>({
      _id: 0,
      a: { $convert: { input: '$a', to: 'string' }
      }});
      -
    • Returns a copy of the array of listeners for the event named eventName, including any wrappers (such as those created by .once()).

      import { EventEmitter } from 'node:events';
      const emitter = new EventEmitter();
      emitter.once('log', () => console.log('log once'));

      // Returns a new Array with a function `onceWrapper` which has a property
      // `listener` which contains the original listener bound above
      const listeners = emitter.rawListeners('log');
      const logFnWrapper = listeners[0];

      // Logs "log once" to the console and does not unbind the `once` event
      logFnWrapper.listener();

      // Logs "log once" to the console and removes the listener
      logFnWrapper();

      emitter.on('log', () => console.log('log persistently'));
      // Will return a new Array with a single function bound by `.on()` above
      const newListeners = emitter.rawListeners('log');

      // Logs "log persistently" twice
      newListeners[0]();
      emitter.emit('log');

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns AbstractCursorEvents[EventKey][]

      v9.4.0

      -
    • Removes all listeners, or those of the specified eventName.

      It is bad practice to remove listeners added elsewhere in the code, particularly when the EventEmitter instance was created by some other component or module (e.g. sockets or file streams).

      Returns a reference to the EventEmitter, so that calls can be chained.

      Type Parameters

      • EventKey extends "close"

      Parameters

      • Optionalevent: string | symbol | EventKey

      Returns this

      v0.1.26

      -
    • Set the cursor returnKey. If set to true, modifies the cursor to only return the index field or fields for the results of the query, rather than documents. If set to true and the query does not use an index to perform the read operation, the returned documents will not contain any fields.

      Parameters

      • value: boolean

        the returnKey value.

        -

      Returns this

    • Rewind this cursor to its uninitialized state. Any options that are present on the cursor will +

    Returns this

    • Rewind this cursor to its uninitialized state. Any options that are present on the cursor will remain in effect. Iterating this cursor will cause new queries to be sent to the server, even if the resultant data has already been retrieved by this cursor.

      -

      Returns void

    • By default EventEmitters will print a warning if more than 10 listeners are added for a particular event. This is a useful default that helps finding memory leaks. The emitter.setMaxListeners() method allows the limit to be modified for this specific EventEmitter instance. The value can be set to Infinity (or 0) to indicate an unlimited number of listeners.

      Returns a reference to the EventEmitter, so that calls can be chained.

      Parameters

      • n: number

      Returns this

      v0.3.5

      -
    • Modifies the output of a query by adding a field $recordId to matching documents. $recordId is the internal key which uniquely identifies a document in a collection.

      +
    • Modifies the output of a query by adding a field $recordId to matching documents. $recordId is the internal key which uniquely identifies a document in a collection.

      Parameters

      • value: boolean

        The $showDiskLoc option has now been deprecated and replaced with the showRecordId field. $showDiskLoc will still be accepted for OP_QUERY stye find.

        -

      Returns this

    • Set the skip for the cursor.

      +

    Returns this

    • Set the skip for the cursor.

      Parameters

      • value: number

        The skip for the cursor query.

        -

      Returns this

    • Sets the sort order of the cursor query.

      +

    Returns this

    • Sets the sort order of the cursor query.

      Parameters

      • sort: Sort

        The key or keys set for the sort.

      • Optionaldirection: SortDirection

        The direction of the sorting (1 or -1).

        -

      Returns this

    • Returns an array of documents. The caller is responsible for making sure that there +

    Returns this

    • Returns an array of documents. The caller is responsible for making sure that there is enough memory to store the results. Note that the array only contains partial results when this cursor had been previously accessed. In that case, cursor.rewind() can be used to reset the cursor.

      -

      Returns Promise<TSchema[]>

    • Experimental

      Listens once to the abort event on the provided signal.

      +

    Returns this

    • Returns a copy of the array of listeners for the event named eventName.

      +
    • Returns the currently set max amount of listeners.

      +
    • A class method that returns the number of listeners for the given eventName registered on the given emitter.

      +
    • A class method that returns the number of listeners for the given eventName registered on the given emitter.

      import { EventEmitter, listenerCount } from 'node:events';

      const myEmitter = new EventEmitter();
      myEmitter.on('event', () => {});
      myEmitter.on('event', () => {});
      console.log(listenerCount(myEmitter, 'event'));
      // Prints: 2
      @@ -524,7 +528,7 @@
    • eventName: string | symbol

      The event name

    Returns number

    v0.9.12

    Since v3.2.0 - Use listenerCount instead.

    -
    • import { on, EventEmitter } from 'node:events';
      import process from 'node:process';

      const ee = new EventEmitter();

      // Emit later on
      process.nextTick(() => {
      ee.emit('foo', 'bar');
      ee.emit('foo', 42);
      });

      for await (const event of on(ee, 'foo')) {
      // The execution of this inner block is synchronous and it
      // processes one event at a time (even with await). Do not use
      // if concurrent execution is required.
      console.log(event); // prints ['bar'] [42]
      }
      // Unreachable here +
    • import { on, EventEmitter } from 'node:events';
      import process from 'node:process';

      const ee = new EventEmitter();

      // Emit later on
      process.nextTick(() => {
      ee.emit('foo', 'bar');
      ee.emit('foo', 42);
      });

      for await (const event of on(ee, 'foo')) {
      // The execution of this inner block is synchronous and it
      // processes one event at a time (even with await). Do not use
      // if concurrent execution is required.
      console.log(event); // prints ['bar'] [42]
      }
      // Unreachable here

      Returns an AsyncIterator that iterates eventName events. It will throw @@ -541,7 +545,7 @@

      Parameters

      • emitter: EventEmitter<DefaultEventMap>
      • eventName: string | symbol
      • Optionaloptions: StaticEventEmitterIteratorOptions

      Returns AsyncIterableIterator<any[]>

      An AsyncIterator that iterates eventName events emitted by the emitter

      v13.6.0, v12.16.0

      -
    • Parameters

      • emitter: EventTarget
      • eventName: string
      • Optionaloptions: StaticEventEmitterIteratorOptions

      Returns AsyncIterableIterator<any[]>

    • Creates a Promise that is fulfilled when the EventEmitter emits the given +

    • Parameters

      • emitter: EventTarget
      • eventName: string
      • Optionaloptions: StaticEventEmitterIteratorOptions

      Returns AsyncIterableIterator<any[]>

    • Creates a Promise that is fulfilled when the EventEmitter emits the given event or that is rejected if the EventEmitter emits 'error' while waiting. The Promise will resolve with an array of all the arguments emitted to the given event.

      @@ -561,9 +565,9 @@

      Parameters

      • emitter: EventEmitter<DefaultEventMap>
      • eventName: string | symbol
      • Optionaloptions: StaticEventEmitterOptions

      Returns Promise<any[]>

      v11.13.0, v10.16.0

      -
    • Parameters

      • emitter: EventTarget
      • eventName: string
      • Optionaloptions: StaticEventEmitterOptions

      Returns Promise<any[]>

    • import { setMaxListeners, EventEmitter } from 'node:events';

      const target = new EventTarget();
      const emitter = new EventEmitter();

      setMaxListeners(5, target, emitter); +
    • Parameters

      • emitter: EventTarget
      • eventName: string
      • Optionaloptions: StaticEventEmitterOptions

      Returns Promise<any[]>

    • import { setMaxListeners, EventEmitter } from 'node:events';

      const target = new EventTarget();
      const emitter = new EventEmitter();

      setMaxListeners(5, target, emitter);

      Parameters

      • Optionaln: number

        A non-negative number. The maximum number of listeners per EventTarget event.

      • Rest...eventTargets: (EventEmitter<DefaultEventMap> | EventTarget)[]

      Returns void

      v15.4.0

      -
    +
    diff --git a/docs/Next/classes/FindOperators.html b/docs/Next/classes/FindOperators.html index 06e8fb13954..29f712286fd 100644 --- a/docs/Next/classes/FindOperators.html +++ b/docs/Next/classes/FindOperators.html @@ -1,6 +1,6 @@ FindOperators | mongodb

    Class FindOperators

    A builder object that is returned from BulkOperationBase#find. Is used to build a write operation that involves a query filter.

    -

    Properties

    Properties

    Methods

    Properties

    bulkOperation: BulkOperationBase

    Methods

    • Specifies arrayFilters for UpdateOne or UpdateMany bulk operations.

      -

      Parameters

      Returns this

    • Upsert modifier for update bulk operation, noting that this operation is an upsert.

      -

      Returns this

    +

    Properties

    bulkOperation: BulkOperationBase

    Methods

    • Specifies arrayFilters for UpdateOne or UpdateMany bulk operations.

      +

      Parameters

      Returns this

    • Upsert modifier for update bulk operation, noting that this operation is an upsert.

      +

      Returns this

    diff --git a/docs/Next/classes/GridFSBucket.html b/docs/Next/classes/GridFSBucket.html index 4a8c1bf561c..917da3f7a8c 100644 --- a/docs/Next/classes/GridFSBucket.html +++ b/docs/Next/classes/GridFSBucket.html @@ -1,5 +1,5 @@ GridFSBucket | mongodb

    Class GridFSBucket

    Constructor for a streaming GridFS interface

    -

    Hierarchy (view full)

    Constructors

    Hierarchy (view full)

    Constructors

    Properties

    captureRejections: boolean

    Value: boolean

    +

    Constructors

    Properties

    captureRejections: boolean

    Value: boolean

    Change the default captureRejections option on all new EventEmitter objects.

    v13.4.0, v12.16.0

    captureRejectionSymbol: typeof captureRejectionSymbol

    Value: Symbol.for('nodejs.rejection')

    @@ -76,16 +76,16 @@ files collections. This event is fired either when 1) it determines that no index creation is necessary, 2) when it successfully creates the necessary indexes.

    -

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    • Deletes a file with the given id

      Parameters

      Returns Promise<void>

    • Removes this bucket's files collection, followed by its chunks collection.

      -

      Returns Promise<void>

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments +

    • Optionaloptions: {
          timeoutMS: number;
      }
      • timeoutMS: number

    Returns Promise<void>

    • Removes this bucket's files collection, followed by its chunks collection.

      +

      Parameters

      • Optionaloptions: {
            timeoutMS: number;
        }
        • timeoutMS: number

      Returns Promise<void>

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments to each.

      Returns true if the event had listeners, false otherwise.

      import { EventEmitter } from 'node:events';
      const myEmitter = new EventEmitter();

      // First listener
      myEmitter.on('event', function firstListener() {
      console.log('Helloooo! first listener');
      });
      // Second listener
      myEmitter.on('event', function secondListener(arg1, arg2) {
      console.log(`event with parameters ${arg1}, ${arg2} in second listener`);
      });
      // Third listener
      myEmitter.on('event', function thirdListener(...args) {
      const parameters = args.join(', ');
      console.log(`event with parameters ${parameters} in third listener`);
      });

      console.log(myEmitter.listeners('event'));

      myEmitter.emit('event', 1, 2, 3, 4, 5);

      // Prints:
      // [
      // [Function: firstListener],
      // [Function: secondListener],
      // [Function: thirdListener]
      // ]
      // Helloooo! first listener
      // event with parameters 1, 2 in second listener
      // event with parameters 1, 2, 3, 4, 5 in third listener @@ -99,7 +99,7 @@

      Returns string[]

      v6.0.0

    • Returns a writable stream (GridFSBucketWriteStream) for writing +

    Returns GridFSBucketWriteStream

    • Adds the listener function to the beginning of the listeners array for the event named eventName. No checks are made to see if the listener has already been added. Multiple calls passing the same combination of eventName and listener will result in the listener being added, and called, multiple times.

      @@ -362,10 +362,10 @@

      Returns a reference to the EventEmitter, so that calls can be chained.

      Parameters

      Returns this

      v0.1.26

      -
    • Renames the file with the given _id to the given string

      Parameters

      • id: ObjectId

        the id of the file to rename

      • filename: string

        new name for the file

        -

      Returns Promise<void>

    • By default EventEmitters will print a warning if more than 10 listeners are +

    • Optionaloptions: {
          timeoutMS: number;
      }
      • timeoutMS: number

    Returns Promise<void>

    • By default EventEmitters will print a warning if more than 10 listeners are added for a particular event. This is a useful default that helps finding memory leaks. The emitter.setMaxListeners() method allows the limit to be modified for this specific EventEmitter instance. The value can be set to Infinity (or 0) to indicate an unlimited number of listeners.

      diff --git a/docs/Next/classes/GridFSBucketReadStream.html b/docs/Next/classes/GridFSBucketReadStream.html index 8a2ae7004fb..45db0d2b95d 100644 --- a/docs/Next/classes/GridFSBucketReadStream.html +++ b/docs/Next/classes/GridFSBucketReadStream.html @@ -1,6 +1,6 @@ GridFSBucketReadStream | mongodb

      Class GridFSBucketReadStream

      A readable stream that enables you to read buffers from GridFS.

      Do not instantiate this class directly. Use openDownloadStream() instead.

      -

      Hierarchy

      • Readable
        • GridFSBucketReadStream

      Properties

      Hierarchy

      • Readable
        • GridFSBucketReadStream

      Properties

      closed destroyed errored readable @@ -140,12 +140,12 @@ regular 'error' listener is installed.

      v13.6.0, v12.17.0

      FILE: "file" = ...

      Fires when the stream loaded the file document corresponding to the provided id.

      -

      Methods

      • Parameters

        • callback: ((error?: null | Error) => void)
            • (error?): void
            • Parameters

              • Optionalerror: null | Error

              Returns void

        Returns void

      • Parameters

        • error: null | Error
        • callback: ((error?: null | Error) => void)
            • (error?): void
            • Parameters

              • Optionalerror: null | Error

              Returns void

        Returns void

      • Calls readable.destroy() with an AbortError and returns a promise that fulfills when the stream is finished.

        +

      Methods

      • Parameters

        • callback: ((error?: null | Error) => void)
            • (error?): void
            • Parameters

              • Optionalerror: null | Error

              Returns void

        Returns void

      • Parameters

        • error: null | Error
        • callback: ((error?: null | Error) => void)
            • (error?): void
            • Parameters

              • Optionalerror: null | Error

              Returns void

        Returns void

      • Calls readable.destroy() with an AbortError and returns a promise that fulfills when the stream is finished.

        Returns Promise<void>

        v20.4.0

      • Returns AsyncIterableIterator<any>

      • Type Parameters

        • K

        Parameters

        • error: Error
        • event: string | symbol
        • Rest...args: AnyRest

        Returns void

      • Marks this stream as aborted (will never push another data event) and kills the underlying cursor. Will emit the 'end' event, and then the 'close' event once the cursor is successfully killed.

        -

        Returns Promise<void>

      • Event emitter The defined events on documents including:

        1. close
        2. @@ -245,7 +245,7 @@ an error if this stream has entered flowing mode (e.g. if you've already called on('data'))

        Parameters

        • end: number = 0

          Offset in bytes to stop reading at

          -

        Returns this

      • Returns an array listing the events for which the emitter has registered +

      Returns this

    • Returns an array listing the events for which the emitter has registered listeners. The values in the array are strings or Symbols.

      import { EventEmitter } from 'node:events';

      const myEE = new EventEmitter();
      myEE.on('foo', () => {});
      myEE.on('bar', () => {});

      const sym = Symbol('symbol');
      myEE.on(sym, () => {});

      console.log(myEE.eventNames());
      // Prints: [ 'foo', 'bar', Symbol(symbol) ]
      @@ -1017,7 +1017,7 @@ an error if this stream has entered flowing mode (e.g. if you've already called on('data'))

      Parameters

      • start: number = 0

        0-based offset in bytes to start streaming from

        -

      Returns this

    • This method returns a new stream with the first limit chunks.

      +

    Returns this

    • This method returns a new stream with the first limit chunks.

      Parameters

      • limit: number

        the number of chunks to take from the readable.

      • Optionaloptions: Pick<ArrayOptions, "signal">

      Returns Readable

      a stream with limit chunks taken.

      v17.5.0

      diff --git a/docs/Next/classes/GridFSBucketWriteStream.html b/docs/Next/classes/GridFSBucketWriteStream.html index 2d22bc8847b..e3b1d87e79d 100644 --- a/docs/Next/classes/GridFSBucketWriteStream.html +++ b/docs/Next/classes/GridFSBucketWriteStream.html @@ -1,6 +1,6 @@ GridFSBucketWriteStream | mongodb

      Class GridFSBucketWriteStream

      A writable stream that enables you to write buffers to GridFS.

      Do not instantiate this class directly. Use openUploadStream() instead.

      -

      Hierarchy

      • Writable
        • GridFSBucketWriteStream

      Properties

      Hierarchy

      • Writable
        • GridFSBucketWriteStream

      Properties

      bucket: GridFSBucket
      bufToStore: Buffer

      Space used to store a chunk currently being inserted

      -

      A Collection instance where the file's chunks are stored

      -
      chunkSizeBytes: number

      The number of bytes that each chunk will be limited to

      -
      closed: boolean

      Is true after 'close' has been emitted.

      +

      Properties

      bucket: GridFSBucket
      bufToStore: Buffer

      Space used to store a chunk currently being inserted

      +

      A Collection instance where the file's chunks are stored

      +
      chunkSizeBytes: number

      The number of bytes that each chunk will be limited to

      +
      closed: boolean

      Is true after 'close' has been emitted.

      v18.0.0

      destroyed: boolean

      Is true after writable.destroy() has been called.

      v8.0.0

      done: boolean

      Indicates the stream is finished uploading

      -
      errored: null | Error

      Returns error if the stream has been destroyed with an error.

      +
      errored: null | Error

      Returns error if the stream has been destroyed with an error.

      v18.0.0

      filename: string

      The name of the file

      -

      A Collection instance where the file's GridFSFile document is stored

      -
      gridFSFile: null | GridFSFile = null

      The document containing information about the inserted file. +

      A Collection instance where the file's GridFSFile document is stored

      +
      gridFSFile: null | GridFSFile = null

      The document containing information about the inserted file. This property is defined after the finish event has been emitted. It will remain null if an error occurs.

      fs.createReadStream('file.txt')
      .pipe(bucket.openUploadStream('file.txt'))
      .on('finish', function () {
      console.log(this.gridFSFile)
      })
      -

      The ObjectId used for the _id field on the GridFSFile document

      -
      length: number

      Accumulates the number of bytes inserted as the stream uploads chunks

      -
      n: number

      Accumulates the number of chunks inserted as the stream uploads file contents

      -

      Options controlling the metadata inserted along with the file

      -
      pos: number

      Tracks the current offset into the buffered bytes being uploaded

      -
      state: {
          aborted: boolean;
          errored: boolean;
          outstandingRequests: number;
          streamEnd: boolean;
      }

      Contains a number of properties indicating the current state of the stream

      +

      The ObjectId used for the _id field on the GridFSFile document

      +
      length: number

      Accumulates the number of bytes inserted as the stream uploads chunks

      +
      n: number

      Accumulates the number of chunks inserted as the stream uploads file contents

      +

      Options controlling the metadata inserted along with the file

      +
      pos: number

      Tracks the current offset into the buffered bytes being uploaded

      +
      state: {
          aborted: boolean;
          errored: boolean;
          outstandingRequests: number;
          streamEnd: boolean;
      }

      Contains a number of properties indicating the current state of the stream

      Type declaration

      • aborted: boolean

        If set the stream was intentionally aborted

      • errored: boolean

        If set an error occurred during insertion

      • outstandingRequests: number

        Indicates the number of chunks that still need to be inserted to exhaust the current buffered data

      • streamEnd: boolean

        If set the stream has ended

        -
      writable: boolean

      Is true if it is safe to call writable.write(), which means +

    writable: boolean

    Is true if it is safe to call writable.write(), which means the stream has not been destroyed, errored, or ended.

    v11.4.0

    writableCorked: number

    Number of times writable.uncork() needs to be @@ -116,7 +116,7 @@

    writableObjectMode: boolean

    Getter for the property objectMode of a given Writable stream.

    v12.3.0

    writeConcern?: WriteConcern

    The write concern setting to be used with every insert operation

    -
    captureRejections: boolean

    Value: boolean

    +
    captureRejections: boolean

    Value: boolean

    Change the default captureRejections option on all new EventEmitter objects.

    v13.4.0, v12.16.0

    captureRejectionSymbol: typeof captureRejectionSymbol

    Value: Symbol.for('nodejs.rejection')

    @@ -153,7 +153,7 @@

    v13.6.0, v12.17.0

    Methods

    • Parameters

      • error: null | Error
      • callback: ((error?: null | Error) => void)
          • (error?): void
          • Parameters

            • Optionalerror: null | Error

            Returns void

      Returns void

    • Parameters

      • chunks: {
            chunk: any;
            encoding: BufferEncoding;
        }[]
      • callback: ((error?: null | Error) => void)
          • (error?): void
          • Parameters

            • Optionalerror: null | Error

            Returns void

      Returns void

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    • Places this write stream into an aborted state (all future writes fail) and deletes all chunks that have already been written.

      -

      Returns Promise<void>

    Constructors

    Properties

    [asyncDispose]: (() => Promise<void>)

    An alias for AbstractCursor.close|AbstractCursor.close().

    -
    filter: Document
    parent: Db
    captureRejections: boolean

    Value: boolean

    +
    filter: Document
    parent: Db
    captureRejections: boolean

    Value: boolean

    Change the default captureRejections option on all new EventEmitter objects.

    v13.4.0, v12.16.0

    captureRejectionSymbol: typeof captureRejectionSymbol

    Value: Symbol.for('nodejs.rejection')

    See how to write a custom rejection handler.

    v13.4.0, v12.16.0

    -
    CLOSE: "close" = ...
    defaultMaxListeners: number

    By default, a maximum of 10 listeners can be registered for any single +

    CLOSE: "close" = ...
    defaultMaxListeners: number

    By default, a maximum of 10 listeners can be registered for any single event. This limit can be changed for individual EventEmitter instances using the emitter.setMaxListeners(n) method. To change the default for allEventEmitter instances, the events.defaultMaxListeners property @@ -94,15 +94,15 @@ regular 'error' listener is installed.

    v13.6.0, v12.17.0

    Accessors

    • get closed(): boolean
    • The cursor is closed and all remaining locally buffered documents have been iterated.

      -

      Returns boolean

    • get id(): undefined | Long
    • The cursor has no id until it receives a response from the initial cursor creating command.

      +

      Returns boolean

    • get id(): undefined | Long
    • The cursor has no id until it receives a response from the initial cursor creating command.

      It is non-zero for as long as the database has an open cursor.

      The initiating command may receive a zero id if the entire result is in the firstBatch.

      -

      Returns undefined | Long

    • get killed(): boolean
    • A killCursors command was attempted on this cursor. This is performed if the cursor id is non zero.

      -

      Returns boolean

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    • Add a cursor flag to the cursor

      Parameters

      • flag:
            | "tailable"
            | "oplogReplay"
            | "noCursorTimeout"
            | "awaitData"
            | "exhaust"
            | "partial"

        The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial' -.

      • value: boolean

        The flag boolean value.

        -

      Returns this

    • Alias for emitter.on(eventName, listener).

      +

    Returns this

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments +

    Returns this

    • Frees any client-side resources used by the cursor.

      +

      Parameters

      • Optionaloptions: {
            timeoutMS?: number;
        }
        • OptionaltimeoutMS?: number

      Returns Promise<void>

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments to each.

      Returns true if the event had listeners, false otherwise.

      import { EventEmitter } from 'node:events';
      const myEmitter = new EventEmitter();

      // First listener
      myEmitter.on('event', function firstListener() {
      console.log('Helloooo! first listener');
      });
      // Second listener
      myEmitter.on('event', function secondListener(arg1, arg2) {
      console.log(`event with parameters ${arg1}, ${arg2} in second listener`);
      });
      // Third listener
      myEmitter.on('event', function thirdListener(...args) {
      const parameters = args.join(', ');
      console.log(`event with parameters ${parameters} in third listener`);
      });

      console.log(myEmitter.listeners('event'));

      myEmitter.emit('event', 1, 2, 3, 4, 5);

      // Prints:
      // [
      // [Function: firstListener],
      // [Function: secondListener],
      // [Function: thirdListener]
      // ]
      // Helloooo! first listener
      // event with parameters 1, 2 in second listener
      // event with parameters 1, 2, 3, 4, 5 in third listener @@ -132,10 +132,10 @@
        • (doc): boolean | void
        • Parameters

          • doc: T

          Returns boolean | void

    Returns Promise<void>

    • Will be removed in a future release. Use for await...of instead.
    -
    • Returns the number of listeners listening for the event named eventName. If listener is provided, it will return how many times the listener is found in the list of the listeners of the event.

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns number

      v3.2.0

      @@ -165,10 +165,10 @@
      const cursor: FindCursor<Document> = coll.find();
      const mappedCursor: FindCursor<number> = cursor.map(doc => Object.keys(doc).length);
      const keyCounts: number[] = await mappedCursor.toArray(); // cursor.toArray() still returns Document[]
      -
    • Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)

      Parameters

      • value: number

        Number of milliseconds to wait before aborting the query.

        -

      Returns this

    • Alias for emitter.removeListener().

      +

    Returns this

    • Removes all listeners, or those of the specified eventName.

      It is bad practice to remove listeners added elsewhere in the code, particularly when the EventEmitter instance was created by some other component or module (e.g. sockets or file streams).

      @@ -409,21 +409,21 @@
    • Rewind this cursor to its uninitialized state. Any options that are present on the cursor will remain in effect. Iterating this cursor will cause new queries to be sent to the server, even if the resultant data has already been retrieved by this cursor.

      -

      Returns void

    • By default EventEmitters will print a warning if more than 10 listeners are added for a particular event. This is a useful default that helps finding memory leaks. The emitter.setMaxListeners() method allows the limit to be modified for this specific EventEmitter instance. The value can be set to Infinity (or 0) to indicate an unlimited number of listeners.

      Returns a reference to the EventEmitter, so that calls can be chained.

      Parameters

      • n: number

      Returns this

      v0.3.5

      -
    • Returns an array of documents. The caller is responsible for making sure that there is enough memory to store the results. Note that the array only contains partial results when this cursor had been previously accessed. In that case, cursor.rewind() can be used to reset the cursor.

      -

      Returns Promise<T[]>

    • Experimental

      Listens once to the abort event on the provided signal.

      +

    Returns this

    • Experimental

      Listens once to the abort event on the provided signal.

      Listening to the abort event on abort signals is unsafe and may lead to resource leaks since another third party with the signal can call e.stopImmediatePropagation(). Unfortunately Node.js cannot change diff --git a/docs/Next/classes/ListIndexesCursor.html b/docs/Next/classes/ListIndexesCursor.html index ecd4a561109..b586b8615b4 100644 --- a/docs/Next/classes/ListIndexesCursor.html +++ b/docs/Next/classes/ListIndexesCursor.html @@ -56,14 +56,14 @@ on once setMaxListeners -

    Constructors

    Properties

    [asyncDispose]: (() => Promise<void>)

    An alias for AbstractCursor.close|AbstractCursor.close().

    -
    captureRejections: boolean

    Value: boolean

    +

    Constructors

    Properties

    [asyncDispose]: (() => Promise<void>)

    An alias for AbstractCursor.close|AbstractCursor.close().

    +
    captureRejections: boolean

    Value: boolean

    Change the default captureRejections option on all new EventEmitter objects.

    v13.4.0, v12.16.0

    captureRejectionSymbol: typeof captureRejectionSymbol

    Value: Symbol.for('nodejs.rejection')

    See how to write a custom rejection handler.

    v13.4.0, v12.16.0

    -
    CLOSE: "close" = ...
    defaultMaxListeners: number

    By default, a maximum of 10 listeners can be registered for any single +

    CLOSE: "close" = ...
    defaultMaxListeners: number

    By default, a maximum of 10 listeners can be registered for any single event. This limit can be changed for individual EventEmitter instances using the emitter.setMaxListeners(n) method. To change the default for allEventEmitter instances, the events.defaultMaxListeners property @@ -93,15 +93,15 @@ regular 'error' listener is installed.

    v13.6.0, v12.17.0

    Accessors

    • get closed(): boolean
    • The cursor is closed and all remaining locally buffered documents have been iterated.

      -

      Returns boolean

    • get id(): undefined | Long
    • The cursor has no id until it receives a response from the initial cursor creating command.

      +

      Returns boolean

    • get id(): undefined | Long
    • The cursor has no id until it receives a response from the initial cursor creating command.

      It is non-zero for as long as the database has an open cursor.

      The initiating command may receive a zero id if the entire result is in the firstBatch.

      -

      Returns undefined | Long

    • get killed(): boolean
    • A killCursors command was attempted on this cursor. This is performed if the cursor id is non zero.

      -

      Returns boolean

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    • Add a cursor flag to the cursor

      Parameters

      • flag:
            | "tailable"
            | "oplogReplay"
            | "noCursorTimeout"
            | "awaitData"
            | "exhaust"
            | "partial"

        The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial' -.

      • value: boolean

        The flag boolean value.

        -

      Returns this

    • Alias for emitter.on(eventName, listener).

      +

    Returns this

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments +

    Returns this

    • Frees any client-side resources used by the cursor.

      +

      Parameters

      • Optionaloptions: {
            timeoutMS?: number;
        }
        • OptionaltimeoutMS?: number

      Returns Promise<void>

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments to each.

      Returns true if the event had listeners, false otherwise.

      import { EventEmitter } from 'node:events';
      const myEmitter = new EventEmitter();

      // First listener
      myEmitter.on('event', function firstListener() {
      console.log('Helloooo! first listener');
      });
      // Second listener
      myEmitter.on('event', function secondListener(arg1, arg2) {
      console.log(`event with parameters ${arg1}, ${arg2} in second listener`);
      });
      // Third listener
      myEmitter.on('event', function thirdListener(...args) {
      const parameters = args.join(', ');
      console.log(`event with parameters ${parameters} in third listener`);
      });

      console.log(myEmitter.listeners('event'));

      myEmitter.emit('event', 1, 2, 3, 4, 5);

      // Prints:
      // [
      // [Function: firstListener],
      // [Function: secondListener],
      // [Function: thirdListener]
      // ]
      // Helloooo! first listener
      // event with parameters 1, 2 in second listener
      // event with parameters 1, 2, 3, 4, 5 in third listener @@ -131,10 +131,10 @@
        • (doc): boolean | void
        • Parameters

          • doc: any

          Returns boolean | void

    Returns Promise<void>

    • Will be removed in a future release. Use for await...of instead.
    -
    • Returns the number of listeners listening for the event named eventName. If listener is provided, it will return how many times the listener is found in the list of the listeners of the event.

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns number

      v3.2.0

      @@ -164,10 +164,10 @@
      const cursor: FindCursor<Document> = coll.find();
      const mappedCursor: FindCursor<number> = cursor.map(doc => Object.keys(doc).length);
      const keyCounts: number[] = await mappedCursor.toArray(); // cursor.toArray() still returns Document[]
      -
    • Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)

      Parameters

      • value: number

        Number of milliseconds to wait before aborting the query.

        -

      Returns this

    • Alias for emitter.removeListener().

      +

    Returns this

    • Removes all listeners, or those of the specified eventName.

      It is bad practice to remove listeners added elsewhere in the code, particularly when the EventEmitter instance was created by some other component or module (e.g. sockets or file streams).

      @@ -408,21 +408,21 @@
    • Rewind this cursor to its uninitialized state. Any options that are present on the cursor will remain in effect. Iterating this cursor will cause new queries to be sent to the server, even if the resultant data has already been retrieved by this cursor.

      -

      Returns void

    • By default EventEmitters will print a warning if more than 10 listeners are added for a particular event. This is a useful default that helps finding memory leaks. The emitter.setMaxListeners() method allows the limit to be modified for this specific EventEmitter instance. The value can be set to Infinity (or 0) to indicate an unlimited number of listeners.

      Returns a reference to the EventEmitter, so that calls can be chained.

      Parameters

      • n: number

      Returns this

      v0.3.5

      -
    • Returns an array of documents. The caller is responsible for making sure that there is enough memory to store the results. Note that the array only contains partial results when this cursor had been previously accessed. In that case, cursor.rewind() can be used to reset the cursor.

      -

      Returns Promise<any[]>

    • Experimental

      Listens once to the abort event on the provided signal.

      +

    Returns this

    • Experimental

      Listens once to the abort event on the provided signal.

      Listening to the abort event on abort signals is unsafe and may lead to resource leaks since another third party with the signal can call e.stopImmediatePropagation(). Unfortunately Node.js cannot change diff --git a/docs/Next/classes/ListSearchIndexesCursor.html b/docs/Next/classes/ListSearchIndexesCursor.html index 944686853db..fb0c3faa58c 100644 --- a/docs/Next/classes/ListSearchIndexesCursor.html +++ b/docs/Next/classes/ListSearchIndexesCursor.html @@ -50,6 +50,7 @@ redact removeAllListeners removeListener +resolveExplainTimeoutOptions rewind setMaxListeners skip @@ -68,13 +69,13 @@ once setMaxListeners

    Properties

    [asyncDispose]: (() => Promise<void>)

    An alias for AbstractCursor.close|AbstractCursor.close().

    -
    pipeline: Document[]
    captureRejections: boolean

    Value: boolean

    +
    pipeline: Document[]
    captureRejections: boolean

    Value: boolean

    Change the default captureRejections option on all new EventEmitter objects.

    v13.4.0, v12.16.0

    captureRejectionSymbol: typeof captureRejectionSymbol

    Value: Symbol.for('nodejs.rejection')

    See how to write a custom rejection handler.

    v13.4.0, v12.16.0

    -
    CLOSE: "close" = ...
    defaultMaxListeners: number

    By default, a maximum of 10 listeners can be registered for any single +

    CLOSE: "close" = ...
    defaultMaxListeners: number

    By default, a maximum of 10 listeners can be registered for any single event. This limit can be changed for individual EventEmitter instances using the emitter.setMaxListeners(n) method. To change the default for allEventEmitter instances, the events.defaultMaxListeners property @@ -104,15 +105,15 @@ regular 'error' listener is installed.

    v13.6.0, v12.17.0

    Accessors

    • get closed(): boolean
    • The cursor is closed and all remaining locally buffered documents have been iterated.

      -

      Returns boolean

    • get id(): undefined | Long
    • The cursor has no id until it receives a response from the initial cursor creating command.

      +

      Returns boolean

    • get id(): undefined | Long
    • The cursor has no id until it receives a response from the initial cursor creating command.

      It is non-zero for as long as the database has an open cursor.

      The initiating command may receive a zero id if the entire result is in the firstBatch.

      -

      Returns undefined | Long

    • get killed(): boolean
    • A killCursors command was attempted on this cursor. This is performed if the cursor id is non zero.

      -

      Returns boolean

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    • Add a cursor flag to the cursor

      Parameters

      • flag:
            | "tailable"
            | "oplogReplay"
            | "noCursorTimeout"
            | "awaitData"
            | "exhaust"
            | "partial"

        The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial' -.

      • value: boolean

        The flag boolean value.

        -

      Returns this

    • Alias for emitter.on(eventName, listener).

      +

    Returns this

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments +

    Returns this

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments to each.

      Returns true if the event had listeners, false otherwise.

      import { EventEmitter } from 'node:events';
      const myEmitter = new EventEmitter();

      // First listener
      myEmitter.on('event', function firstListener() {
      console.log('Helloooo! first listener');
      });
      // Second listener
      myEmitter.on('event', function secondListener(arg1, arg2) {
      console.log(`event with parameters ${arg1}, ${arg2} in second listener`);
      });
      // Third listener
      myEmitter.on('event', function thirdListener(...args) {
      const parameters = args.join(', ');
      console.log(`event with parameters ${parameters} in third listener`);
      });

      console.log(myEmitter.listeners('event'));

      myEmitter.emit('event', 1, 2, 3, 4, 5);

      // Prints:
      // [
      // [Function: firstListener],
      // [Function: secondListener],
      // [Function: thirdListener]
      // ]
      // Helloooo! first listener
      // event with parameters 1, 2 in second listener
      // event with parameters 1, 2, 3, 4, 5 in third listener @@ -143,20 +144,23 @@

      Returns string[]

      v6.0.0

      -
    • Iterates over all the documents for this cursor using the iterator, callback pattern.

      If the iterator returns false, iteration will stop.

      Parameters

      • iterator: ((doc: {
            name: string;
        }) => boolean | void)

        The iteration callback.

          • (doc): boolean | void
          • Parameters

            • doc: {
                  name: string;
              }
              • name: string

            Returns boolean | void

      Returns Promise<void>

      • Will be removed in a future release. Use for await...of instead.
      -
    • Map all documents using the provided function If there is a transform set on the cursor, that will be called first and the result passed to this function's transform.

      Type Parameters

      • T

      Parameters

      • transform: ((doc: {
            name: string;
        }) => T)

        The mapping transformation method.

        @@ -187,11 +191,11 @@
        const cursor: FindCursor<Document> = coll.find();
        const mappedCursor: FindCursor<number> = cursor.map(doc => Object.keys(doc).length);
        const keyCounts: number[] = await mappedCursor.toArray(); // cursor.toArray() still returns Document[]
        -
    • Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)

      Parameters

      • value: number

        Number of milliseconds to wait before aborting the query.

        -

      Returns this

    • Get the next available document from the cursor, returns null if no more documents are available.

      -

      Returns Promise<null | {
          name: string;
      }>

    • Alias for emitter.removeListener().

      +

    Returns this

    • Get the next available document from the cursor, returns null if no more documents are available.

      +

      Returns Promise<null | {
          name: string;
      }>

    • Adds the listener function to the beginning of the listeners array for the event named eventName. No checks are made to see if the listener has already been added. Multiple calls passing the same combination of eventName and listener will result in the listener being added, and called, multiple times.

      @@ -347,15 +351,15 @@
      const cursor: AggregationCursor<{ a: number; b: string }> = coll.aggregate([]);
      const projectCursor = cursor.project<{ a: number }>({ _id: 0, a: true });
      const aPropOnlyArray: {a: number}[] = await projectCursor.toArray();

      // or always use chaining and save the final cursor

      const cursor = coll.aggregate().project<{ a: string }>({
      _id: 0,
      a: { $convert: { input: '$a', to: 'string' }
      }});
      -
    • Returns a copy of the array of listeners for the event named eventName, including any wrappers (such as those created by .once()).

      import { EventEmitter } from 'node:events';
      const emitter = new EventEmitter();
      emitter.once('log', () => console.log('log once'));

      // Returns a new Array with a function `onceWrapper` which has a property
      // `listener` which contains the original listener bound above
      const listeners = emitter.rawListeners('log');
      const logFnWrapper = listeners[0];

      // Logs "log once" to the console and does not unbind the `once` event
      logFnWrapper.listener();

      // Logs "log once" to the console and removes the listener
      logFnWrapper();

      emitter.on('log', () => console.log('log persistently'));
      // Will return a new Array with a single function bound by `.on()` above
      const newListeners = emitter.rawListeners('log');

      // Logs "log persistently" twice
      newListeners[0]();
      emitter.emit('log');

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns AbstractCursorEvents[EventKey][]

      v9.4.0

    • Rewind this cursor to its uninitialized state. Any options that are present on the cursor will remain in effect. Iterating this cursor will cause new queries to be sent to the server, even if the resultant data has already been retrieved by this cursor.

      -

      Returns void

    • By default EventEmitters will print a warning if more than 10 listeners are added for a particular event. This is a useful default that helps finding memory leaks. The emitter.setMaxListeners() method allows the limit to be modified for this specific EventEmitter instance. The value can be set to Infinity (or 0) to indicate an unlimited number of listeners.

      Returns a reference to the EventEmitter, so that calls can be chained.

      Parameters

      • n: number

      Returns this

      v0.3.5

    • Returns an array of documents. The caller is responsible for making sure that there is enough memory to store the results. Note that the array only contains partial results when this cursor had been previously accessed. In that case, cursor.rewind() can be used to reset the cursor.

      -

      Returns Promise<{
          name: string;
      }[]>

    • Experimental

      Listens once to the abort event on the provided signal.

      +

    Returns this

    • Experimental

      Listens once to the abort event on the provided signal.

      Listening to the abort event on abort signals is unsafe and may lead to resource leaks since another third party with the signal can call e.stopImmediatePropagation(). Unfortunately Node.js cannot change @@ -546,4 +550,4 @@

      Parameters

      • Optionaln: number

        A non-negative number. The maximum number of listeners per EventTarget event.

      • Rest...eventTargets: (EventEmitter<DefaultEventMap> | EventTarget)[]

      Returns void

      v15.4.0

      -
    +
    diff --git a/docs/Next/classes/MongoBulkWriteError.html b/docs/Next/classes/MongoBulkWriteError.html index 49d0c21951e..35a9b87ab93 100644 --- a/docs/Next/classes/MongoBulkWriteError.html +++ b/docs/Next/classes/MongoBulkWriteError.html @@ -1,5 +1,5 @@ MongoBulkWriteError | mongodb

    Class MongoBulkWriteError

    An error indicating an unsuccessful Bulk Write

    -

    Hierarchy (view full)

    Constructors

    Hierarchy (view full)

    Constructors

    Properties

    cause? code? codeName? @@ -33,19 +33,19 @@

    Meant for internal use only.

    Parameters

    Returns MongoBulkWriteError

    This class is only meant to be constructed within the driver. This constructor is not subject to semantic versioning compatibility guarantees and may change at any time.

    -

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    -
    codeName?: string
    connectionGeneration?: number
    errInfo?: Document
    errorResponse: ErrorDescription

    Raw error result document returned by server.

    -
    message: string
    ok?: number
    stack?: string
    topologyVersion?: TopologyVersion
    writeConcernError?: Document
    writeErrors: OneOrMore<WriteError> = []
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +
    codeName?: string
    connectionGeneration?: number
    errInfo?: Document
    errorResponse: ErrorDescription

    Raw error result document returned by server.

    +
    message: string
    ok?: number
    stack?: string
    topologyVersion?: TopologyVersion
    writeConcernError?: Document
    writeErrors: OneOrMore<WriteError> = []
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get insertedIds(): {
          [key: number]: any;
      }
    • Inserted document generated Id's, hash key is the index of the originating operation

      -

      Returns {
          [key: number]: any;
      }

      • [key: number]: any
    • get matchedCount(): number
    • Number of documents matched for update.

      -

      Returns number

    • get upsertedIds(): {
          [key: number]: any;
      }
    • Upserted document generated Id's, hash key is the index of the originating operation

      -

      Returns {
          [key: number]: any;
      }

      • [key: number]: any

    Methods

    • Checks the error to see if it has an error label

      +

      Returns number

    • get insertedIds(): {
          [key: number]: any;
      }
    • Inserted document generated Id's, hash key is the index of the originating operation

      +

      Returns {
          [key: number]: any;
      }

      • [key: number]: any
    • get matchedCount(): number
    • Number of documents matched for update.

      +

      Returns number

    • get upsertedIds(): {
          [key: number]: any;
      }
    • Upserted document generated Id's, hash key is the index of the originating operation

      +

      Returns {
          [key: number]: any;
      }

      • [key: number]: any

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    • Create .stack property on a target object

      diff --git a/docs/Next/classes/MongoClient.html b/docs/Next/classes/MongoClient.html index 8dc4750e877..464d9bc1fc2 100644 --- a/docs/Next/classes/MongoClient.html +++ b/docs/Next/classes/MongoClient.html @@ -3,7 +3,7 @@
      import { MongoClient } from 'mongodb';

      // Enable command monitoring for debugging
      const client = new MongoClient('mongodb://localhost:27017', { monitorCommands: true });

      client.on('commandStarted', started => console.log(started));
      client.db().collection('pets');
      await client.insertOne({ name: 'spot', kind: 'dog' });
      -

    Hierarchy (view full)

    Implements

    Constructors

    Hierarchy (view full)

    Implements

    Constructors

    Properties

    [asyncDispose]: (() => Promise<void>)

    An alias for MongoClient.close().

    -
    captureRejections: boolean

    Value: boolean

    +

    Constructors

    Properties

    [asyncDispose]: (() => Promise<void>)

    An alias for MongoClient.close().

    +
    captureRejections: boolean

    Value: boolean

    Change the default captureRejections option on all new EventEmitter objects.

    v13.4.0, v12.16.0

    captureRejectionSymbol: typeof captureRejectionSymbol

    v13.6.0, v12.17.0

    -

    Accessors

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    Accessors

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    • Alias for emitter.on(eventName, listener).

      Type Parameters

      • EventKey extends
            | "error"
            | "timeout"
            | "close"
            | "open"
            | "serverOpening"
            | "serverClosed"
            | "serverDescriptionChanged"
            | "topologyOpening"
            | "topologyClosed"
            | "topologyDescriptionChanged"
            | "connectionPoolCreated"
            | "connectionPoolClosed"
            | "connectionPoolCleared"
            | "connectionPoolReady"
            | "connectionCreated"
            | "connectionReady"
            | "connectionClosed"
            | "connectionCheckOutStarted"
            | "connectionCheckOutFailed"
            | "connectionCheckedOut"
            | "connectionCheckedIn"
            | "commandStarted"
            | "commandSucceeded"
            | "commandFailed"
            | "serverHeartbeatStarted"
            | "serverHeartbeatSucceeded"
            | "serverHeartbeatFailed"

      Parameters

      Returns this

      v0.1.26

    • Alias for emitter.on(eventName, listener).

      Parameters

      Returns this

      v0.1.26

      @@ -94,7 +95,7 @@

      Returns Promise<ClientBulkWriteResult>

      A ClientBulkWriteResult for acknowledged writes and ok: 1 for unacknowledged writes.

      -
    • Cleans up client-side resources used by the MongoCLient and . This includes:

      +
    • Create a new Db instance sharing the current socket connections.

      +
    • Connect to MongoDB using a url

      +

      Returns Promise<MongoClient>

      Calling connect is optional since the first operation you perform will call connect if it's needed. +timeoutMS will bound the time any operation can take before throwing a timeout error. +However, when the operation being run is automatically connecting your MongoClient the timeoutMS will not apply to the time taken to connect the MongoClient. +This means the time to setup the MongoClient does not count against timeoutMS. +If you are using timeoutMS we recommend connecting your client explicitly in advance of any operation to avoid this inconsistent execution time.

      +

      docs.mongodb.org/manual/reference/connection-string/

      +
    • Create a new Db instance sharing the current socket connections.

      Parameters

      • OptionaldbName: string

        The name of the database we want to use. If not provided, use database name from connection string.

      • Optionaloptions: DbOptions

        Optional settings for Db construction

        -

      Returns Db

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments +

    Returns Db

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments to each.

      Returns true if the event had listeners, false otherwise.

      import { EventEmitter } from 'node:events';
      const myEmitter = new EventEmitter();

      // First listener
      myEmitter.on('event', function firstListener() {
      console.log('Helloooo! first listener');
      });
      // Second listener
      myEmitter.on('event', function secondListener(arg1, arg2) {
      console.log(`event with parameters ${arg1}, ${arg2} in second listener`);
      });
      // Third listener
      myEmitter.on('event', function thirdListener(...args) {
      const parameters = args.join(', ');
      console.log(`event with parameters ${parameters} in third listener`);
      });

      console.log(myEmitter.listeners('event'));

      myEmitter.emit('event', 1, 2, 3, 4, 5);

      // Prints:
      // [
      // [Function: firstListener],
      // [Function: secondListener],
      // [Function: thirdListener]
      // ]
      // Helloooo! first listener
      // event with parameters 1, 2 in second listener
      // event with parameters 1, 2, 3, 4, 5 in third listener @@ -379,24 +385,36 @@
    • Creates a new ClientSession. When using the returned session in an operation a corresponding ServerSession will be created.

      -

      Parameters

      Returns ClientSession

      A ClientSession instance may only be passed to operations being performed on the same +

      Parameters

      Returns ClientSession

      A ClientSession instance may only be passed to operations being performed on the same MongoClient it was started from.

      -
    • Create a new Change Stream, watching for new changes (insertions, updates, replacements, deletions, and invalidations) in this cluster. Will ignore all changes to system collections, as well as the local, admin, and config databases.

      Type Parameters

      Parameters

      • pipeline: Document[] = []

        An array of pipeline stages through which to pass change stream documents. This allows for filtering (using $match) and manipulating the change stream documents.

      • options: ChangeStreamOptions = {}

        Optional settings for the command

        -

      Returns ChangeStream<TSchema, TChange>

      watch() accepts two generic arguments for distinct use cases:

      +

    Returns ChangeStream<TSchema, TChange>

    watch() accepts two generic arguments for distinct use cases:

    • The first is to provide the schema that may be defined for all the data within the current cluster
    • The second is to override the shape of the change stream document entirely, if it is not provided the type will default to ChangeStreamDocument of the first argument
    -
    • A convenience method for creating and handling the clean up of a ClientSession. +

      In iterator mode, if a next() call throws a timeout error, it will attempt to resume the change stream. +The next call can just be retried after this succeeds.

      +
      const changeStream = collection.watch([], { timeoutMS: 100 });
      try {
      await changeStream.next();
      } catch (e) {
      if (e instanceof MongoOperationTimeoutError && !changeStream.closed) {
      await changeStream.next();
      }
      throw e;
      } +
      + +

      In emitter mode, if the change stream goes timeoutMS without emitting a change event, it will +emit an error event that returns a MongoOperationTimeoutError, but will not close the change +stream unless the resume attempt fails. There is no need to re-establish change listeners as +this will automatically continue emitting change events once the resume attempt completes.

      +
      const changeStream = collection.watch([], { timeoutMS: 100 });
      changeStream.on('change', console.log);
      changeStream.on('error', e => {
      if (e instanceof MongoOperationTimeoutError && !changeStream.closed) {
      // do nothing
      } else {
      changeStream.close();
      }
      }); +
      + +
    • A convenience method for creating and handling the clean up of a ClientSession. The session will always be ended when the executor finishes.

      Type Parameters

      • T = any

      Parameters

      • executor: WithSessionCallback<T>

        An executor function that all operations using the provided session must be invoked in

        -

      Returns Promise<T>

    • Type Parameters

      • T = any

      Parameters

      Returns Promise<T>

    • Experimental

      Listens once to the abort event on the provided signal.

      +

    Returns Promise<T>

  • Type Parameters

    • T = any

    Parameters

    Returns Promise<T>

    • Connect to MongoDB using a url

      -

      Parameters

      Returns Promise<MongoClient>

      The programmatically provided options take precedence over the URI options.

      +

      Parameters

      Returns Promise<MongoClient>

      Calling connect is optional since the first operation you perform will call connect if it's needed. +timeoutMS will bound the time any operation can take before throwing a timeout error. +However, when the operation being run is automatically connecting your MongoClient the timeoutMS will not apply to the time taken to connect the MongoClient. +This means the time to setup the MongoClient does not count against timeoutMS. +If you are using timeoutMS we recommend connecting your client explicitly in advance of any operation to avoid this inconsistent execution time.

      https://www.mongodb.com/docs/manual/reference/connection-string/

      -
    • Returns a copy of the array of listeners for the event named eventName.

      +
    • Returns a copy of the array of listeners for the event named eventName.

      For EventEmitters this behaves exactly the same as calling .listeners on the emitter.

      For EventTargets this is the only way to get the event listeners for the @@ -483,4 +505,4 @@

      Parameters

      • Optionaln: number

        A non-negative number. The maximum number of listeners per EventTarget event.

      • Rest...eventTargets: (EventEmitter<DefaultEventMap> | EventTarget)[]

      Returns void

      v15.4.0

      -
    +
    diff --git a/docs/Next/classes/MongoCompatibilityError.html b/docs/Next/classes/MongoCompatibilityError.html index 48e3dc3e957..226ef2d62d1 100644 --- a/docs/Next/classes/MongoCompatibilityError.html +++ b/docs/Next/classes/MongoCompatibilityError.html @@ -1,6 +1,6 @@ MongoCompatibilityError | mongodb

    Class MongoCompatibilityError

    An error generated when a feature that is not enabled or allowed for the current server configuration is used

    -

    Hierarchy (view full)

    Constructors

    Hierarchy (view full)

    Constructors

    Properties

    cause? code? connectionGeneration? @@ -19,11 +19,11 @@

    Meant for internal use only.

    Parameters

    • message: string

    Returns MongoCompatibilityError

    This class is only meant to be constructed within the driver. This constructor is not subject to semantic versioning compatibility guarantees and may change at any time.

    -

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    connectionGeneration?: number
    message: string
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    • Create .stack property on a target object

      diff --git a/docs/Next/classes/MongoCursorExhaustedError.html b/docs/Next/classes/MongoCursorExhaustedError.html index 34fa4140c6a..231e1c58111 100644 --- a/docs/Next/classes/MongoCursorExhaustedError.html +++ b/docs/Next/classes/MongoCursorExhaustedError.html @@ -1,5 +1,5 @@ MongoCursorExhaustedError | mongodb

      Class MongoCursorExhaustedError

      An error thrown when an attempt is made to read from a cursor that has been exhausted

      -

      Hierarchy (view full)

      Constructors

      Hierarchy (view full)

      Constructors

      Properties

      cause? code? connectionGeneration? @@ -18,11 +18,11 @@

      Meant for internal use only.

      Parameters

      • Optionalmessage: string

      Returns MongoCursorExhaustedError

      This class is only meant to be constructed within the driver. This constructor is not subject to semantic versioning compatibility guarantees and may change at any time.

      -

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    connectionGeneration?: number
    message: string
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    • Create .stack property on a target object

      diff --git a/docs/Next/classes/MongoCursorInUseError.html b/docs/Next/classes/MongoCursorInUseError.html index c42fcb2c693..0c989131334 100644 --- a/docs/Next/classes/MongoCursorInUseError.html +++ b/docs/Next/classes/MongoCursorInUseError.html @@ -1,6 +1,6 @@ MongoCursorInUseError | mongodb

      Class MongoCursorInUseError

      An error thrown when the user attempts to add options to a cursor that has already been initialized

      -

      Hierarchy (view full)

      Constructors

      Hierarchy (view full)

      Constructors

      Properties

      cause? code? connectionGeneration? @@ -19,11 +19,11 @@

      Meant for internal use only.

      Parameters

      • message: string = 'Cursor is already initialized'

      Returns MongoCursorInUseError

      This class is only meant to be constructed within the driver. This constructor is not subject to semantic versioning compatibility guarantees and may change at any time.

      -

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    connectionGeneration?: number
    message: string
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    Constructors

    Properties

    collection: string

    collection name

    +
    db: string

    database name

    +

    Methods

    diff --git a/docs/Next/classes/MongoDBNamespace.html b/docs/Next/classes/MongoDBNamespace.html index 9da28b16879..0656f27379b 100644 --- a/docs/Next/classes/MongoDBNamespace.html +++ b/docs/Next/classes/MongoDBNamespace.html @@ -1,4 +1,4 @@ -MongoDBNamespace | mongodb

    Class MongoDBNamespace

    Hierarchy (view full)

    Constructors

    constructor +MongoDBNamespace | mongodb

    Class MongoDBNamespace

    Hierarchy (view full)

    Constructors

    Properties

    Methods

    toString @@ -7,6 +7,6 @@

    Constructors

    Properties

    collection?: string

    collection name

    -
    db: string

    database name

    -

    Methods

    +

    Returns MongoDBNamespace

    Properties

    collection?: string

    collection name

    +
    db: string

    database name

    +

    Methods

    diff --git a/docs/Next/classes/MongoDriverError.html b/docs/Next/classes/MongoDriverError.html index 94ca1d35b1a..83d2c2f793e 100644 --- a/docs/Next/classes/MongoDriverError.html +++ b/docs/Next/classes/MongoDriverError.html @@ -1,5 +1,5 @@ MongoDriverError | mongodb

    Class MongoDriverError

    An error generated by the driver

    -

    Hierarchy (view full)

    Constructors

    Hierarchy (view full)

    Constructors

    Properties

    cause? code? connectionGeneration? diff --git a/docs/Next/classes/MongoInvalidArgumentError.html b/docs/Next/classes/MongoInvalidArgumentError.html index cc5851334b3..ff29da4019f 100644 --- a/docs/Next/classes/MongoInvalidArgumentError.html +++ b/docs/Next/classes/MongoInvalidArgumentError.html @@ -1,6 +1,6 @@ MongoInvalidArgumentError | mongodb

    Class MongoInvalidArgumentError

    An error generated when the user supplies malformed or unexpected arguments or when a required argument or field is not provided.

    -

    Hierarchy (view full)

    Constructors

    Hierarchy (view full)

    Constructors

    Properties

    cause? code? connectionGeneration? @@ -19,11 +19,11 @@

    Meant for internal use only.

    Parameters

    • message: string
    • Optionaloptions: {
          cause?: Error;
      }
      • Optionalcause?: Error

    Returns MongoInvalidArgumentError

    This class is only meant to be constructed within the driver. This constructor is not subject to semantic versioning compatibility guarantees and may change at any time.

    -

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    connectionGeneration?: number
    message: string
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    • Create .stack property on a target object

      diff --git a/docs/Next/classes/MongoMissingCredentialsError.html b/docs/Next/classes/MongoMissingCredentialsError.html index 346c3b48ca4..bd71fc28f7e 100644 --- a/docs/Next/classes/MongoMissingCredentialsError.html +++ b/docs/Next/classes/MongoMissingCredentialsError.html @@ -1,6 +1,6 @@ MongoMissingCredentialsError | mongodb

      Class MongoMissingCredentialsError

      An error generated when the user fails to provide authentication credentials before attempting to connect to a mongo server instance.

      -

      Hierarchy (view full)

      Constructors

      Hierarchy (view full)

      Constructors

      Properties

      cause? code? connectionGeneration? @@ -19,11 +19,11 @@

      Meant for internal use only.

      Parameters

      • message: string

      Returns MongoMissingCredentialsError

      This class is only meant to be constructed within the driver. This constructor is not subject to semantic versioning compatibility guarantees and may change at any time.

      -

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    connectionGeneration?: number
    message: string
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    • Create .stack property on a target object

      diff --git a/docs/Next/classes/MongoMissingDependencyError.html b/docs/Next/classes/MongoMissingDependencyError.html index 227337e45c3..045dbe36f71 100644 --- a/docs/Next/classes/MongoMissingDependencyError.html +++ b/docs/Next/classes/MongoMissingDependencyError.html @@ -1,5 +1,5 @@ MongoMissingDependencyError | mongodb

      Class MongoMissingDependencyError

      An error generated when a required module or dependency is not present in the local environment

      -

      Hierarchy (view full)

      Constructors

      Hierarchy (view full)

      Constructors

      Properties

      cause code? connectionGeneration? @@ -19,12 +19,12 @@

      Meant for internal use only.

      Parameters

      • message: string
      • options: {
            cause: Error;
            dependencyName: string;
        }
        • cause: Error
        • dependencyName: string

      Returns MongoMissingDependencyError

      This class is only meant to be constructed within the driver. This constructor is not subject to semantic versioning compatibility guarantees and may change at any time.

      -

    Properties

    cause: Error

    This property is assigned in the Error constructor.

    -
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    -
    connectionGeneration?: number
    dependencyName: string
    message: string
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    +

    Properties

    cause: Error

    This property is assigned in the Error constructor.

    +
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +
    connectionGeneration?: number
    dependencyName: string
    message: string
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    • Create .stack property on a target object

      diff --git a/docs/Next/classes/MongoNetworkError.html b/docs/Next/classes/MongoNetworkError.html index fd90fdc5e84..77f49a7dc1f 100644 --- a/docs/Next/classes/MongoNetworkError.html +++ b/docs/Next/classes/MongoNetworkError.html @@ -1,5 +1,5 @@ MongoNetworkError | mongodb

      Class MongoNetworkError

      An error indicating an issue with the network, including TCP errors and timeouts.

      -

      Hierarchy (view full)

      Constructors

      Hierarchy (view full)

      Constructors

      Properties

      cause? code? connectionGeneration? @@ -18,11 +18,11 @@

      Meant for internal use only.

      Parameters

      Returns MongoNetworkError

      This class is only meant to be constructed within the driver. This constructor is not subject to semantic versioning compatibility guarantees and may change at any time.

      -

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    connectionGeneration?: number
    message: string
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    • Create .stack property on a target object

      diff --git a/docs/Next/classes/MongoNetworkTimeoutError.html b/docs/Next/classes/MongoNetworkTimeoutError.html index 84dc66fbab0..4e10159bc29 100644 --- a/docs/Next/classes/MongoNetworkTimeoutError.html +++ b/docs/Next/classes/MongoNetworkTimeoutError.html @@ -1,5 +1,5 @@ MongoNetworkTimeoutError | mongodb

      Class MongoNetworkTimeoutError

      An error indicating a network timeout occurred

      -

      Hierarchy (view full)

      Constructors

      Hierarchy (view full)

      Constructors

      Properties

      cause? code? connectionGeneration? @@ -18,11 +18,11 @@

      Meant for internal use only.

      Parameters

      Returns MongoNetworkTimeoutError

      This class is only meant to be constructed within the driver. This constructor is not subject to semantic versioning compatibility guarantees and may change at any time.

      -

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    connectionGeneration?: number
    message: string
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    connectionGeneration?: number
    message: string
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    connectionGeneration?: number
    message: string
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    • Create .stack property on a target object

      diff --git a/docs/Next/classes/MongoServerSelectionError.html b/docs/Next/classes/MongoServerSelectionError.html index 80c929facf9..3382f4e261d 100644 --- a/docs/Next/classes/MongoServerSelectionError.html +++ b/docs/Next/classes/MongoServerSelectionError.html @@ -1,5 +1,5 @@ MongoServerSelectionError | mongodb

      Class MongoServerSelectionError

      An error signifying a client-side server selection error

      -

      Hierarchy (view full)

      Constructors

      Hierarchy (view full)

      Constructors

      Properties

      cause? code? connectionGeneration? @@ -19,12 +19,12 @@

      Meant for internal use only.

      Parameters

      Returns MongoServerSelectionError

      This class is only meant to be constructed within the driver. This constructor is not subject to semantic versioning compatibility guarantees and may change at any time.

      -

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    connectionGeneration?: number
    message: string

    An optional reason context, such as an error saved during flow of monitoring and selecting servers

    -
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    +
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    connectionGeneration?: number
    message: string

    An optional reason context, such as an error saved during flow of monitoring and selecting servers

    -
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    +
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    • Create .stack property on a target object

      diff --git a/docs/Next/classes/MongoTopologyClosedError.html b/docs/Next/classes/MongoTopologyClosedError.html index cf3fe80c4ef..5d12aa6b96b 100644 --- a/docs/Next/classes/MongoTopologyClosedError.html +++ b/docs/Next/classes/MongoTopologyClosedError.html @@ -1,6 +1,6 @@ MongoTopologyClosedError | mongodb

      Class MongoTopologyClosedError

      An error generated when an attempt is made to operate on a dropped, or otherwise unavailable, database.

      -

      Hierarchy (view full)

      Constructors

      Hierarchy (view full)

      Constructors

      Properties

      cause? code? connectionGeneration? @@ -19,11 +19,11 @@

      Meant for internal use only.

      Parameters

      • message: string = 'Topology is closed'

      Returns MongoTopologyClosedError

      This class is only meant to be constructed within the driver. This constructor is not subject to semantic versioning compatibility guarantees and may change at any time.

      -

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    connectionGeneration?: number
    message: string
    stack?: string
    topologyVersion?: TopologyVersion
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    • Create .stack property on a target object

      diff --git a/docs/Next/classes/MongoWriteConcernError.html b/docs/Next/classes/MongoWriteConcernError.html index fed650b1f7c..220098a593f 100644 --- a/docs/Next/classes/MongoWriteConcernError.html +++ b/docs/Next/classes/MongoWriteConcernError.html @@ -1,5 +1,5 @@ MongoWriteConcernError | mongodb

      Class MongoWriteConcernError

      An error thrown when the server reports a writeConcernError

      -

      Hierarchy (view full)

      Constructors

      Hierarchy (view full)

      Constructors

      Properties

      cause? code? codeName? @@ -24,13 +24,13 @@

      Meant for internal use only.

      Parameters

      Returns MongoWriteConcernError

      This class is only meant to be constructed within the driver. This constructor is not subject to semantic versioning compatibility guarantees and may change at any time.

      -

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    +

    Properties

    cause?: Error
    code?: string | number

    This is a number in MongoServerError and a string in MongoDriverError

    codeName?: string
    connectionGeneration?: number
    errInfo?: Document
    errorResponse: ErrorDescription

    Raw error result document returned by server.

    message: string
    ok?: number
    result: Document

    The result document

    -
    stack?: string
    topologyVersion?: TopologyVersion
    writeConcernError?: Document
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    +
    stack?: string
    topologyVersion?: TopologyVersion
    writeConcernError?: Document
    prepareStackTrace?: ((err: Error, stackTraces: CallSite[]) => any)

    Optional override for formatting stack traces

    stackTraceLimit: number

    Accessors

    • get errmsg(): string
    • Legacy name for server error responses

      -

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      +

      Returns string

    • get errorLabels(): string[]
    • Returns string[]

    • get name(): string
    • Returns string

    Methods

    • Checks the error to see if it has an error label

      Parameters

      • label: string

        The error label to check for

      Returns boolean

      returns true if the error has the provided error label

    • Create .stack property on a target object

      diff --git a/docs/Next/classes/OrderedBulkOperation.html b/docs/Next/classes/OrderedBulkOperation.html index 313e3c6b3e8..9d131063987 100644 --- a/docs/Next/classes/OrderedBulkOperation.html +++ b/docs/Next/classes/OrderedBulkOperation.html @@ -9,14 +9,14 @@ find insert raw -

    Properties

    isOrdered: boolean
    operationId?: number

    Accessors

    Methods

    • Builds a find operation for an update/updateOne/delete/deleteOne/replaceOne. +

    Properties

    isOrdered: boolean
    operationId?: number

    Accessors

    Methods

    • Builds a find operation for an update/updateOne/delete/deleteOne/replaceOne. Returns a builder object used to complete the definition of the operation.

      Parameters

      Returns FindOperators

      const bulkOp = collection.initializeOrderedBulkOp();

      // Add an updateOne to the bulkOp
      bulkOp.find({ a: 1 }).updateOne({ $set: { b: 2 } });

      // Add an updateMany to the bulkOp
      bulkOp.find({ c: 3 }).update({ $set: { d: 4 } });

      // Add an upsert
      bulkOp.find({ e: 5 }).upsert().updateOne({ $set: { f: 6 } });

      // Add a deletion
      bulkOp.find({ g: 7 }).deleteOne();

      // Add a multi deletion
      bulkOp.find({ h: 8 }).delete();

      // Add a replaceOne
      bulkOp.find({ i: 9 }).replaceOne({writeConcern: { j: 10 }});

      // Update using a pipeline (requires Mongodb 4.2 or higher)
      bulk.find({ k: 11, y: { $exists: true }, z: { $exists: true } }).updateOne([
      { $set: { total: { $sum: [ '$y', '$z' ] } } }
      ]);

      // All of the ops will now be executed
      await bulkOp.execute();
      -
    +
    diff --git a/docs/Next/classes/RunCommandCursor.html b/docs/Next/classes/RunCommandCursor.html index 89ba86ff581..848e1742350 100644 --- a/docs/Next/classes/RunCommandCursor.html +++ b/docs/Next/classes/RunCommandCursor.html @@ -1,4 +1,4 @@ -RunCommandCursor | mongodb

    Class RunCommandCursor

    Hierarchy (view full)

    Properties

    [asyncDispose] +RunCommandCursor | mongodb

    Class RunCommandCursor

    Hierarchy (view full)

    Properties

    [asyncDispose]: (() => Promise<void>)

    An alias for AbstractCursor.close|AbstractCursor.close().

    -
    command: Readonly<Record<string, any>>
    getMoreOptions: {
        batchSize?: number;
        comment?: any;
        maxAwaitTimeMS?: number;
    } = {}
    captureRejections: boolean

    Value: boolean

    +
    command: Readonly<Record<string, any>>
    getMoreOptions: {
        batchSize?: number;
        comment?: any;
        maxAwaitTimeMS?: number;
    } = {}
    captureRejections: boolean

    Value: boolean

    Change the default captureRejections option on all new EventEmitter objects.

    v13.4.0, v12.16.0

    captureRejectionSymbol: typeof captureRejectionSymbol

    Value: Symbol.for('nodejs.rejection')

    See how to write a custom rejection handler.

    v13.4.0, v12.16.0

    -
    CLOSE: "close" = ...
    defaultMaxListeners: number

    By default, a maximum of 10 listeners can be registered for any single +

    CLOSE: "close" = ...
    defaultMaxListeners: number

    By default, a maximum of 10 listeners can be registered for any single event. This limit can be changed for individual EventEmitter instances using the emitter.setMaxListeners(n) method. To change the default for allEventEmitter instances, the events.defaultMaxListeners property @@ -95,23 +95,23 @@ regular 'error' listener is installed.

    v13.6.0, v12.17.0

    Accessors

    • get closed(): boolean
    • The cursor is closed and all remaining locally buffered documents have been iterated.

      -

      Returns boolean

    • get id(): undefined | Long
    • The cursor has no id until it receives a response from the initial cursor creating command.

      +

      Returns boolean

    • get id(): undefined | Long
    • The cursor has no id until it receives a response from the initial cursor creating command.

      It is non-zero for as long as the database has an open cursor.

      The initiating command may receive a zero id if the entire result is in the firstBatch.

      -

      Returns undefined | Long

    • get killed(): boolean
    • A killCursors command was attempted on this cursor. This is performed if the cursor id is non zero.

      -

      Returns boolean

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    Methods

    • Type Parameters

      • K

      Parameters

      • error: Error
      • event: string | symbol
      • Rest...args: AnyRest

      Returns void

    • Frees any client-side resources used by the cursor.

      +

      Parameters

      • Optionaloptions: {
            timeoutMS?: number;
        }
        • OptionaltimeoutMS?: number

      Returns Promise<void>

    • Synchronously calls each of the listeners registered for the event named eventName, in the order they were registered, passing the supplied arguments to each.

      Returns true if the event had listeners, false otherwise.

      import { EventEmitter } from 'node:events';
      const myEmitter = new EventEmitter();

      // First listener
      myEmitter.on('event', function firstListener() {
      console.log('Helloooo! first listener');
      });
      // Second listener
      myEmitter.on('event', function secondListener(arg1, arg2) {
      console.log(`event with parameters ${arg1}, ${arg2} in second listener`);
      });
      // Third listener
      myEmitter.on('event', function thirdListener(...args) {
      const parameters = args.join(', ');
      console.log(`event with parameters ${parameters} in third listener`);
      });

      console.log(myEmitter.listeners('event'));

      myEmitter.emit('event', 1, 2, 3, 4, 5);

      // Prints:
      // [
      // [Function: firstListener],
      // [Function: secondListener],
      // [Function: thirdListener]
      // ]
      // Helloooo! first listener
      // event with parameters 1, 2 in second listener
      // event with parameters 1, 2, 3, 4, 5 in third listener @@ -130,10 +130,10 @@
        • (doc): boolean | void
        • Parameters

          • doc: any

          Returns boolean | void

    Returns Promise<void>

    • Will be removed in a future release. Use for await...of instead.
    -
    • Returns the number of listeners listening for the event named eventName. If listener is provided, it will return how many times the listener is found in the list of the listeners of the event.

      Type Parameters

      • EventKey extends "close"

      Parameters

      Returns number

      v3.2.0

      @@ -163,9 +163,9 @@
      const cursor: FindCursor<Document> = coll.find();
      const mappedCursor: FindCursor<number> = cursor.map(doc => Object.keys(doc).length);
      const keyCounts: number[] = await mappedCursor.toArray(); // cursor.toArray() still returns Document[]
      -
    • Removes all listeners, or those of the specified eventName.

      It is bad practice to remove listeners added elsewhere in the code, particularly when the EventEmitter instance was created by some other component or module (e.g. sockets or file streams).

      @@ -406,10 +406,11 @@
    • Rewind this cursor to its uninitialized state. Any options that are present on the cursor will remain in effect. Iterating this cursor will cause new queries to be sent to the server, even if the resultant data has already been retrieved by this cursor.

      -

      Returns void

    • Controls the getMore.batchSize field

      +

      Parameters

      • batchSize: number

        the number documents to return in the nextBatch

        +

      Returns this

    • By default EventEmitters will print a warning if more than 10 listeners are +

    Returns this

    • By default EventEmitters will print a warning if more than 10 listeners are added for a particular event. This is a useful default that helps finding memory leaks. The emitter.setMaxListeners() method allows the limit to be modified for this specific EventEmitter instance. The value can be set to Infinity (or 0) to indicate an unlimited number of listeners.

      @@ -417,15 +418,15 @@
    • Controls the getMore.maxTimeMS field. Only valid when cursor is tailable await

      Parameters

      • maxTimeMS: number

        the number of milliseconds to wait for new data

        -

      Returns this

    • Returns an array of documents. The caller is responsible for making sure that there +

    Returns this

    • Returns an array of documents. The caller is responsible for making sure that there is enough memory to store the results. Note that the array only contains partial results when this cursor had been previously accessed. In that case, cursor.rewind() can be used to reset the cursor.

      -

      Returns Promise<any[]>

    • Experimental

      Listens once to the abort event on the provided signal.

      +

    Returns this

    Methods

    Properties

    $clusterTime?: ClusterTime
    address: string
    arbiters: string[]
    electionId: null | ObjectId
    error: null | MongoError
    hosts: string[]
    lastUpdateTime: number
    lastWriteDate: number
    logicalSessionTimeoutMinutes: null | number
    maxBsonObjectSize: null | number

    The max bson object size.

    +

    Properties

    $clusterTime?: ClusterTime
    address: string
    arbiters: string[]
    electionId: null | ObjectId
    error: null | MongoError
    hosts: string[]
    iscryptd: boolean

    Indicates server is a mongocryptd instance.

    +
    lastUpdateTime: number
    lastWriteDate: number
    logicalSessionTimeoutMinutes: null | number
    maxBsonObjectSize: null | number

    The max bson object size.

    maxMessageSizeBytes: null | number

    The max message size in bytes for the server.

    maxWireVersion: number
    maxWriteBatchSize: null | number

    The max number of writes in a bulk write command.

    me: null | string
    minRoundTripTime: number

    The minimum measurement of the last 10 measurements of roundTripTime that have been collected

    -
    minWireVersion: number
    passives: string[]
    primary: null | string
    roundTripTime: number
    setName: null | string
    setVersion: null | number
    tags: TagSet
    topologyVersion: null | TopologyVersion

    Accessors

    Methods

    • Determines if another ServerDescription is equal to this one per the rules defined in the SDAM specification.

      +
    minWireVersion: number
    passives: string[]
    primary: null | string
    roundTripTime: number
    setName: null | string
    setVersion: null | number
    tags: TagSet
    topologyVersion: null | TopologyVersion

    Accessors

    Methods

    +
    diff --git a/docs/Next/classes/ServerSession.html b/docs/Next/classes/ServerSession.html index a41cd5d958a..327f7a9ecf3 100644 --- a/docs/Next/classes/ServerSession.html +++ b/docs/Next/classes/ServerSession.html @@ -1,10 +1,10 @@ ServerSession | mongodb

    Class ServerSession

    Reflects the existence of a session on the server. Can be reused by the session pool. WARNING: not meant to be instantiated directly. For internal use only.

    -

    Properties

    id +

    Properties

    isDirty: boolean
    lastUse: number
    txnNumber: number

    Methods

    • Determines if the server session has timed out.

      +

    Properties

    isDirty: boolean
    lastUse: number
    txnNumber: number

    Methods

    • Determines if the server session has timed out.

      Parameters

      • sessionTimeoutMinutes: number

        The server's "logicalSessionTimeoutMinutes"

        -

      Returns boolean

    +

    Returns boolean

    diff --git a/docs/Next/classes/UnorderedBulkOperation.html b/docs/Next/classes/UnorderedBulkOperation.html index a35c5c69e8e..076d1d752be 100644 --- a/docs/Next/classes/UnorderedBulkOperation.html +++ b/docs/Next/classes/UnorderedBulkOperation.html @@ -9,14 +9,14 @@ find insert raw -

    Properties

    isOrdered: boolean
    operationId?: number

    Accessors

    Methods

    • Builds a find operation for an update/updateOne/delete/deleteOne/replaceOne. +

    Properties

    isOrdered: boolean
    operationId?: number

    Accessors

    Methods

    • Builds a find operation for an update/updateOne/delete/deleteOne/replaceOne. Returns a builder object used to complete the definition of the operation.

      Parameters

      Returns FindOperators

      const bulkOp = collection.initializeOrderedBulkOp();

      // Add an updateOne to the bulkOp
      bulkOp.find({ a: 1 }).updateOne({ $set: { b: 2 } });

      // Add an updateMany to the bulkOp
      bulkOp.find({ c: 3 }).update({ $set: { d: 4 } });

      // Add an upsert
      bulkOp.find({ e: 5 }).upsert().updateOne({ $set: { f: 6 } });

      // Add a deletion
      bulkOp.find({ g: 7 }).deleteOne();

      // Add a multi deletion
      bulkOp.find({ h: 8 }).delete();

      // Add a replaceOne
      bulkOp.find({ i: 9 }).replaceOne({writeConcern: { j: 10 }});

      // Update using a pipeline (requires Mongodb 4.2 or higher)
      bulk.find({ k: 11, y: { $exists: true }, z: { $exists: true } }).updateOne([
      { $set: { total: { $sum: [ '$y', '$z' ] } } }
      ]);

      // All of the ops will now be executed
      await bulkOp.execute();
      -
    +
    diff --git a/docs/Next/classes/WriteConcern.html b/docs/Next/classes/WriteConcern.html index 6d3c64292c5..9a3c475c23d 100644 --- a/docs/Next/classes/WriteConcern.html +++ b/docs/Next/classes/WriteConcern.html @@ -1,7 +1,7 @@ WriteConcern | mongodb

    Class WriteConcern

    A MongoDB WriteConcern, which describes the level of acknowledgement requested from MongoDB for write operations.

    Constructors

    Constructors

    Properties

    fsync? j? journal? @@ -15,16 +15,16 @@
  • OptionalwtimeoutMS: number

    specify a time limit to prevent write operations from blocking indefinitely

  • Optionaljournal: boolean

    request acknowledgment that the write operation has been written to the on-disk journal

  • Optionalfsync: boolean | 1

    equivalent to the j option. Is deprecated and will be removed in the next major version.

    -
  • Returns WriteConcern

    Properties

    fsync?: boolean | 1

    Equivalent to the j option.

    +

    Returns WriteConcern

    Properties

    fsync?: boolean | 1

    Equivalent to the j option.

    Will be removed in the next major version. Please use journal.

    -
    j?: boolean

    Request acknowledgment that the write operation has been written to the on-disk journal.

    +
    j?: boolean

    Request acknowledgment that the write operation has been written to the on-disk journal.

    Will be removed in the next major version. Please use journal.

    -
    journal?: boolean

    Request acknowledgment that the write operation has been written to the on-disk journal

    -
    w?: W

    Request acknowledgment that the write operation has propagated to a specified number of mongod instances or to mongod instances with specified tags. +

    journal?: boolean

    Request acknowledgment that the write operation has been written to the on-disk journal

    +
    w?: W

    Request acknowledgment that the write operation has propagated to a specified number of mongod instances or to mongod instances with specified tags. If w is 0 and is set on a write operation, the server will not send a response.

    -
    wtimeout?: number

    Specify a time limit to prevent write operations from blocking indefinitely.

    +
    wtimeout?: number

    Specify a time limit to prevent write operations from blocking indefinitely.

    Will be removed in the next major version. Please use wtimeoutMS.

    -
    wtimeoutMS?: number

    Specify a time limit to prevent write operations from blocking indefinitely

    -

    Methods

    +
    wtimeoutMS?: number

    Specify a time limit to prevent write operations from blocking indefinitely.

    +

    Methods

    diff --git a/docs/Next/classes/WriteConcernError.html b/docs/Next/classes/WriteConcernError.html index f9054492b75..6501193bd75 100644 --- a/docs/Next/classes/WriteConcernError.html +++ b/docs/Next/classes/WriteConcernError.html @@ -1,11 +1,11 @@ WriteConcernError | mongodb

    Class WriteConcernError

    An error representing a failure by the server to apply the requested write concern to the bulk operation.

    -

    Constructors

    Constructors

    Accessors

    Methods

    Constructors

    Accessors

    • get code(): undefined | number
    • Write concern error code.

      -

      Returns undefined | number

    • get errmsg(): undefined | string
    • Write concern error message.

      -

      Returns undefined | string

    Methods

    +

    Constructors

    Accessors

    • get code(): undefined | number
    • Write concern error code.

      +

      Returns undefined | number

    • get errmsg(): undefined | string
    • Write concern error message.

      +

      Returns undefined | string

    Methods

    diff --git a/docs/Next/classes/WriteError.html b/docs/Next/classes/WriteError.html index 8f3e45cb663..7582ca27081 100644 --- a/docs/Next/classes/WriteError.html +++ b/docs/Next/classes/WriteError.html @@ -1,5 +1,5 @@ WriteError | mongodb

    Class WriteError

    An error that occurred during a BulkWrite on the server.

    -

    Constructors

    Constructors

    Properties

    Accessors

    code errInfo @@ -8,9 +8,9 @@

    Methods

    Constructors

    Properties

    Accessors

    • get errmsg(): undefined | string
    • WriteError message.

      -

      Returns undefined | string

    Methods

    • Returns {
          code: number;
          errmsg?: string;
          index: number;
          op: Document;
      }

      • code: number
      • Optionalerrmsg?: string
      • index: number
      • op: Document
    +

    Constructors

    Properties

    Accessors

    • get errmsg(): undefined | string
    • WriteError message.

      +

      Returns undefined | string

    Methods

    • Returns {
          code: number;
          errmsg?: string;
          index: number;
          op: Document;
      }

      • code: number
      • Optionalerrmsg?: string
      • index: number
      • op: Document
    diff --git a/docs/Next/hierarchy.html b/docs/Next/hierarchy.html index 18ba8babe9e..4b8457220ca 100644 --- a/docs/Next/hierarchy.html +++ b/docs/Next/hierarchy.html @@ -1 +1 @@ -mongodb

    mongodb

    Class Hierarchy

    +mongodb

    mongodb

    Class Hierarchy

    diff --git a/docs/Next/interfaces/AWSEncryptionKeyOptions.html b/docs/Next/interfaces/AWSEncryptionKeyOptions.html index 76d2265ce68..8b1ce5d1318 100644 --- a/docs/Next/interfaces/AWSEncryptionKeyOptions.html +++ b/docs/Next/interfaces/AWSEncryptionKeyOptions.html @@ -1,8 +1,8 @@ AWSEncryptionKeyOptions | mongodb

    Interface AWSEncryptionKeyOptions

    Configuration options for making an AWS encryption key

    -
    interface AWSEncryptionKeyOptions {
        endpoint?: string;
        key: string;
        region: string;
    }

    Properties

    interface AWSEncryptionKeyOptions {
        endpoint?: string;
        key: string;
        region: string;
    }

    Properties

    Properties

    endpoint?: string

    An alternate host to send KMS requests to. May include port number.

    -
    key: string

    The Amazon Resource Name (ARN) to the AWS customer master key (CMK)

    -
    region: string

    The AWS region of the KMS

    -
    +
    key: string

    The Amazon Resource Name (ARN) to the AWS customer master key (CMK)

    +
    region: string

    The AWS region of the KMS

    +
    diff --git a/docs/Next/interfaces/AbstractCursorOptions.html b/docs/Next/interfaces/AbstractCursorOptions.html index 43bc0230d6f..540d858985c 100644 --- a/docs/Next/interfaces/AbstractCursorOptions.html +++ b/docs/Next/interfaces/AbstractCursorOptions.html @@ -1,4 +1,4 @@ -AbstractCursorOptions | mongodb

    Interface AbstractCursorOptions

    interface AbstractCursorOptions {
        awaitData?: boolean;
        batchSize?: number;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        comment?: unknown;
        enableUtf8Validation?: boolean;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        noCursorTimeout?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        serializeFunctions?: boolean;
        session?: ClientSession;
        tailable?: boolean;
        useBigInt64?: boolean;
    }

    Hierarchy (view full)

    Properties

    awaitData? +AbstractCursorOptions | mongodb

    Interface AbstractCursorOptions

    interface AbstractCursorOptions {
        awaitData?: boolean;
        batchSize?: number;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        comment?: unknown;
        enableUtf8Validation?: boolean;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        noCursorTimeout?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        serializeFunctions?: boolean;
        session?: ClientSession;
        tailable?: boolean;
        timeoutMode?: CursorTimeoutMode;
        timeoutMS?: number;
        useBigInt64?: boolean;
    }

    Hierarchy (view full)

    Properties

    awaitData?: boolean

    If awaitData is set to true, when the cursor reaches the end of the capped collection, MongoDB blocks the query thread for a period of time waiting for new data to arrive. When new data is inserted into the capped collection, the blocked thread is signaled to wake up and return the next batch to the client.

    -
    batchSize?: number

    Specifies the number of documents to return in each response from MongoDB

    -
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +
    batchSize?: number

    Specifies the number of documents to return in each response from MongoDB

    +
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    @@ -32,7 +34,7 @@

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    ignoreUndefined?: boolean

    serialize will not emit undefined fields @@ -40,9 +42,9 @@

    true

    maxAwaitTimeMS?: number

    When applicable maxAwaitTimeMS controls the amount of time subsequent getMores that a cursor uses to fetch more data should take. (ex. cursor.next())

    -
    maxTimeMS?: number

    When applicable maxTimeMS controls the amount of time the initial command +

    maxTimeMS?: number

    When applicable maxTimeMS controls the amount of time the initial command that constructs a cursor should take. (ex. find, aggregate, listCollections)

    -
    noCursorTimeout?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    noCursorTimeout?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -59,12 +61,27 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    -
    readConcern?: ReadConcernLike
    readPreference?: ReadPreferenceLike
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readConcern?: ReadConcernLike
    readPreference?: ReadPreferenceLike
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    -
    session?: ClientSession
    tailable?: boolean

    By default, MongoDB will automatically close a cursor when the +

    session?: ClientSession
    tailable?: boolean

    By default, MongoDB will automatically close a cursor when the client has exhausted all results in the cursor. However, for capped collections you may use a Tailable Cursor that remains open after the client exhausts the results in the initial cursor.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMode?: CursorTimeoutMode

    Specifies how timeoutMS is applied to the cursor. Can be either 'cursorLifeTime' or 'iteration' +When set to 'iteration', the deadline specified by timeoutMS applies to each call of +cursor.next(). +When set to 'cursorLifetime', the deadline applies to the life of the entire cursor.

    +

    Depending on the type of cursor being used, this option has different default values. +For non-tailable cursors, this value defaults to 'cursorLifetime' +For tailable cursors, this value defaults to 'iteration' since tailable cursors, by +definition can have an arbitrarily long lifetime.

    +
    const cursor = collection.find({}, {timeoutMS: 100, timeoutMode: 'iteration'});
    for await (const doc of cursor) {
    // process doc
    // This will throw a timeout error if any of the iterator's `next()` calls takes more than 100ms, but
    // will continue to iterate successfully otherwise, regardless of the number of batches.
    } +
    + +
    const cursor = collection.find({}, { timeoutMS: 1000, timeoutMode: 'cursorLifetime' });
    const docs = await cursor.toArray(); // This entire line will throw a timeout error if all batches are not fetched and returned within 1000ms. +
    + +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error. See AbstractCursorOptions.timeoutMode for more details on how this option applies to cursors.

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    +
    diff --git a/docs/Next/interfaces/AggregateOptions.html b/docs/Next/interfaces/AggregateOptions.html index 29119d6a9b1..2e86700538f 100644 --- a/docs/Next/interfaces/AggregateOptions.html +++ b/docs/Next/interfaces/AggregateOptions.html @@ -1,4 +1,4 @@ -AggregateOptions | mongodb

    Interface AggregateOptions

    interface AggregateOptions {
        allowDiskUse?: boolean;
        authdb?: string;
        batchSize?: number;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        cursor?: Document;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Hint;
        ignoreUndefined?: boolean;
        let?: Document;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        out?: string;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    allowDiskUse? +AggregateOptions | mongodb

    Interface AggregateOptions

    interface AggregateOptions {
        allowDiskUse?: boolean;
        authdb?: string;
        batchSize?: number;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        cursor?: Document;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Hint;
        ignoreUndefined?: boolean;
        let?: Document;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        out?: string;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    allowDiskUse?: boolean

    allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 >).

    -
    authdb?: string
    batchSize?: number

    The number of documents to return per batch. See aggregation documentation.

    -
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +
    authdb?: string
    batchSize?: number

    The number of documents to return per batch. See aggregation documentation.

    +
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    bypassDocumentValidation?: boolean

    Allow driver to bypass schema validation.

    -
    checkKeys?: boolean

    the serializer will check if keys are valid.

    +
    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Specify collation.

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    cursor?: Document

    Return the query as cursor, on 2.6 > it returns as a real cursor on pre 2.6 it returns as an emulated cursor.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    cursor?: Document

    Return the query as cursor, on 2.6 > it returns as a real cursor on pre 2.6 it returns as an emulated cursor.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    This API is deprecated in favor of collection.aggregate().explain() or db.aggregate().explain().

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    hint?: Hint

    Add an index selection hint to an aggregation command

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    -
    maxAwaitTimeMS?: number

    The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query.

    -
    maxTimeMS?: number

    specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point.

    -
    noResponse?: boolean
    omitReadPreference?: boolean
    out?: string
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    maxAwaitTimeMS?: number

    The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query.

    +
    maxTimeMS?: number

    Specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    out?: string
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -75,12 +76,13 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/AggregationCursorOptions.html b/docs/Next/interfaces/AggregationCursorOptions.html index b427ef8a9ce..e08949e787f 100644 --- a/docs/Next/interfaces/AggregationCursorOptions.html +++ b/docs/Next/interfaces/AggregationCursorOptions.html @@ -1,4 +1,4 @@ -AggregationCursorOptions | mongodb

    Interface AggregationCursorOptions

    interface AggregationCursorOptions {
        allowDiskUse?: boolean;
        authdb?: string;
        awaitData?: boolean;
        batchSize?: number;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        cursor?: Document;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Hint;
        ignoreUndefined?: boolean;
        let?: Document;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        noCursorTimeout?: boolean;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        out?: string;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        tailable?: boolean;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    allowDiskUse? +AggregationCursorOptions | mongodb

    Interface AggregationCursorOptions

    interface AggregationCursorOptions {
        allowDiskUse?: boolean;
        authdb?: string;
        awaitData?: boolean;
        batchSize?: number;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        cursor?: Document;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Hint;
        ignoreUndefined?: boolean;
        let?: Document;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        noCursorTimeout?: boolean;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        out?: string;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        tailable?: boolean;
        timeoutMode?: CursorTimeoutMode;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    allowDiskUse?: boolean

    allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 >).

    -
    authdb?: string
    awaitData?: boolean

    If awaitData is set to true, when the cursor reaches the end of the capped collection, +

    authdb?: string
    awaitData?: boolean

    If awaitData is set to true, when the cursor reaches the end of the capped collection, MongoDB blocks the query thread for a period of time waiting for new data to arrive. When new data is inserted into the capped collection, the blocked thread is signaled to wake up and return the next batch to the client.

    -
    batchSize?: number

    Specifies the number of documents to return in each response from MongoDB

    -
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +
    batchSize?: number

    Specifies the number of documents to return in each response from MongoDB

    +
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    bypassDocumentValidation?: boolean

    Allow driver to bypass schema validation.

    -
    checkKeys?: boolean

    the serializer will check if keys are valid.

    +
    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Specify collation.

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    cursor?: Document

    Return the query as cursor, on 2.6 > it returns as a real cursor on pre 2.6 it returns as an emulated cursor.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    cursor?: Document

    Return the query as cursor, on 2.6 > it returns as a real cursor on pre 2.6 it returns as an emulated cursor.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    This API is deprecated in favor of collection.aggregate().explain() or db.aggregate().explain().

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    hint?: Hint

    Add an index selection hint to an aggregation command

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    -
    maxAwaitTimeMS?: number

    When applicable maxAwaitTimeMS controls the amount of time subsequent getMores +

    maxAwaitTimeMS?: number

    When applicable maxAwaitTimeMS controls the amount of time subsequent getMores that a cursor uses to fetch more data should take. (ex. cursor.next())

    -
    maxTimeMS?: number

    When applicable maxTimeMS controls the amount of time the initial command +

    maxTimeMS?: number

    When applicable maxTimeMS controls the amount of time the initial command that constructs a cursor should take. (ex. find, aggregate, listCollections)

    -
    noCursorTimeout?: boolean
    noResponse?: boolean
    omitReadPreference?: boolean
    out?: string
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    noCursorTimeout?: boolean
    noResponse?: boolean
    omitReadPreference?: boolean
    out?: string
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -84,16 +86,31 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    tailable?: boolean

    By default, MongoDB will automatically close a cursor when the +

    tailable?: boolean

    By default, MongoDB will automatically close a cursor when the client has exhausted all results in the cursor. However, for capped collections you may use a Tailable Cursor that remains open after the client exhausts the results in the initial cursor.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMode?: CursorTimeoutMode

    Specifies how timeoutMS is applied to the cursor. Can be either 'cursorLifeTime' or 'iteration' +When set to 'iteration', the deadline specified by timeoutMS applies to each call of +cursor.next(). +When set to 'cursorLifetime', the deadline applies to the life of the entire cursor.

    +

    Depending on the type of cursor being used, this option has different default values. +For non-tailable cursors, this value defaults to 'cursorLifetime' +For tailable cursors, this value defaults to 'iteration' since tailable cursors, by +definition can have an arbitrarily long lifetime.

    +
    const cursor = collection.find({}, {timeoutMS: 100, timeoutMode: 'iteration'});
    for await (const doc of cursor) {
    // process doc
    // This will throw a timeout error if any of the iterator's `next()` calls takes more than 100ms, but
    // will continue to iterate successfully otherwise, regardless of the number of batches.
    } +
    + +
    const cursor = collection.find({}, { timeoutMS: 1000, timeoutMode: 'cursorLifetime' });
    const docs = await cursor.toArray(); // This entire line will throw a timeout error if all batches are not fetched and returned within 1000ms. +
    + +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error. See AbstractCursorOptions.timeoutMode for more details on how this option applies to cursors.

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/AzureEncryptionKeyOptions.html b/docs/Next/interfaces/AzureEncryptionKeyOptions.html index eb2b8b23a89..13a5c5d082e 100644 --- a/docs/Next/interfaces/AzureEncryptionKeyOptions.html +++ b/docs/Next/interfaces/AzureEncryptionKeyOptions.html @@ -1,8 +1,8 @@ AzureEncryptionKeyOptions | mongodb

    Interface AzureEncryptionKeyOptions

    Configuration options for making an Azure encryption key

    -
    interface AzureEncryptionKeyOptions {
        keyName: string;
        keyVaultEndpoint: string;
        keyVersion?: string;
    }

    Properties

    interface AzureEncryptionKeyOptions {
        keyName: string;
        keyVaultEndpoint: string;
        keyVersion?: string;
    }

    Properties

    keyName: string

    Key name

    -
    keyVaultEndpoint: string

    Key vault URL, typically <name>.vault.azure.net

    -
    keyVersion?: string

    Key version

    -
    +
    keyVaultEndpoint: string

    Key vault URL, typically <name>.vault.azure.net

    +
    keyVersion?: string

    Key version

    +
    diff --git a/docs/Next/interfaces/BulkWriteOperationError.html b/docs/Next/interfaces/BulkWriteOperationError.html index 6d8c1c2f7c2..4a47b40fee6 100644 --- a/docs/Next/interfaces/BulkWriteOperationError.html +++ b/docs/Next/interfaces/BulkWriteOperationError.html @@ -1,6 +1,6 @@ -BulkWriteOperationError | mongodb

    Interface BulkWriteOperationError

    interface BulkWriteOperationError {
        code: number;
        errInfo: Document;
        errmsg: string;
        index: number;
        op: Document | DeleteStatement | UpdateStatement;
    }

    Properties

    code +BulkWriteOperationError | mongodb

    Interface BulkWriteOperationError

    interface BulkWriteOperationError {
        code: number;
        errInfo: Document;
        errmsg: string;
        index: number;
        op: Document | DeleteStatement | UpdateStatement;
    }

    Properties

    Properties

    code: number
    errInfo: Document
    errmsg: string
    index: number
    +

    Properties

    code: number
    errInfo: Document
    errmsg: string
    index: number
    diff --git a/docs/Next/interfaces/BulkWriteOptions.html b/docs/Next/interfaces/BulkWriteOptions.html index 69b6d9fa3a9..a6ad82e2fd4 100644 --- a/docs/Next/interfaces/BulkWriteOptions.html +++ b/docs/Next/interfaces/BulkWriteOptions.html @@ -1,4 +1,4 @@ -BulkWriteOptions | mongodb

    Interface BulkWriteOptions

    interface BulkWriteOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        forceServerObjectId?: boolean;
        ignoreUndefined?: boolean;
        let?: Document;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        ordered?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +BulkWriteOptions | mongodb

    Interface BulkWriteOptions

    interface BulkWriteOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        forceServerObjectId?: boolean;
        ignoreUndefined?: boolean;
        let?: Document;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        ordered?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    bypassDocumentValidation?: boolean

    Allow driver to bypass schema validation.

    false - documents will be validated by default

    -
    checkKeys?: boolean

    the serializer will check if keys are valid.

    +
    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    forceServerObjectId?: boolean

    Force server to assign _id values instead of driver.

    false - the driver generates _id fields by default

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    -
    maxTimeMS?: number
    noResponse?: boolean
    omitReadPreference?: boolean
    ordered?: boolean

    If true, when an insert fails, don't execute the remaining writes. +

    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    ordered?: boolean

    If true, when an insert fails, don't execute the remaining writes. If false, continue with remaining inserts when one fails.

    true - inserts are ordered by default

    -
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -69,12 +71,13 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/ChangeStreamCollModDocument.html b/docs/Next/interfaces/ChangeStreamCollModDocument.html index 13f1a1e584c..d01afe92842 100644 --- a/docs/Next/interfaces/ChangeStreamCollModDocument.html +++ b/docs/Next/interfaces/ChangeStreamCollModDocument.html @@ -1,6 +1,6 @@ ChangeStreamCollModDocument | mongodb

    Interface ChangeStreamCollModDocument

    Only present when the showExpandedEvents flag is enabled.

    interface ChangeStreamCollModDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationType: "modify";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamCollModDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationType: "modify";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    +
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -
    operationType: "modify"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    operationType: "modify"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamCreateDocument.html b/docs/Next/interfaces/ChangeStreamCreateDocument.html index d8da7dbcbe9..9bd7664a975 100644 --- a/docs/Next/interfaces/ChangeStreamCreateDocument.html +++ b/docs/Next/interfaces/ChangeStreamCreateDocument.html @@ -1,5 +1,5 @@ ChangeStreamCreateDocument | mongodb

    Interface ChangeStreamCreateDocument

    interface ChangeStreamCreateDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationType: "create";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamCreateDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationType: "create";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    +
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -
    operationType: "create"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    operationType: "create"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamCreateIndexDocument.html b/docs/Next/interfaces/ChangeStreamCreateIndexDocument.html index beeec6accf4..b10ac15b1ec 100644 --- a/docs/Next/interfaces/ChangeStreamCreateIndexDocument.html +++ b/docs/Next/interfaces/ChangeStreamCreateIndexDocument.html @@ -1,6 +1,6 @@ ChangeStreamCreateIndexDocument | mongodb

    Interface ChangeStreamCreateIndexDocument

    Only present when the showExpandedEvents flag is enabled.

    interface ChangeStreamCreateIndexDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationDescription?: Document;
        operationType: "createIndexes";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamCreateIndexDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationDescription?: Document;
        operationType: "createIndexes";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    +
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -
    operationDescription?: Document

    An description of the operation.

    +
    operationDescription?: Document

    An description of the operation.

    Only present when the showExpandedEvents flag is enabled.

    6.1.0

    -
    operationType: "createIndexes"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    operationType: "createIndexes"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamDeleteDocument.html b/docs/Next/interfaces/ChangeStreamDeleteDocument.html index 394cc2b4150..f6c1d3a7595 100644 --- a/docs/Next/interfaces/ChangeStreamDeleteDocument.html +++ b/docs/Next/interfaces/ChangeStreamDeleteDocument.html @@ -1,5 +1,5 @@ ChangeStreamDeleteDocument | mongodb

    Interface ChangeStreamDeleteDocument<TSchema>

    interface ChangeStreamDeleteDocument<TSchema> {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        documentKey: {
            _id: InferIdType<TSchema>;
            [shardKey: string]: any;
        };
        fullDocumentBeforeChange?: TSchema;
        lsid?: ServerSessionId;
        ns: ChangeStreamNameSpace;
        operationType: "delete";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Type Parameters

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamDeleteDocument<TSchema> {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        documentKey: {
            _id: InferIdType<TSchema>;
            [shardKey: string]: any;
        };
        fullDocumentBeforeChange?: TSchema;
        lsid?: ServerSessionId;
        ns: ChangeStreamNameSpace;
        operationType: "delete";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Type Parameters

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    +
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -
    documentKey: {
        _id: InferIdType<TSchema>;
        [shardKey: string]: any;
    }

    For unsharded collections this contains a single field _id. +

    documentKey: {
        _id: InferIdType<TSchema>;
        [shardKey: string]: any;
    }

    For unsharded collections this contains a single field _id. For sharded collections, this will contain all the components of the shard key

    -
    fullDocumentBeforeChange?: TSchema

    Contains the pre-image of the modified or deleted document if the +

    fullDocumentBeforeChange?: TSchema

    Contains the pre-image of the modified or deleted document if the pre-image is available for the change event and either 'required' or 'whenAvailable' was specified for the 'fullDocumentBeforeChange' option when creating the change stream. If 'whenAvailable' was specified but the pre-image is unavailable, this will be explicitly set to null.

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -

    Namespace the delete event occurred on

    -
    operationType: "delete"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    Namespace the delete event occurred on

    +
    operationType: "delete"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamDocumentCollectionUUID.html b/docs/Next/interfaces/ChangeStreamDocumentCollectionUUID.html index ca7c8c3dac7..122ad87d67b 100644 --- a/docs/Next/interfaces/ChangeStreamDocumentCollectionUUID.html +++ b/docs/Next/interfaces/ChangeStreamDocumentCollectionUUID.html @@ -1,7 +1,7 @@ -ChangeStreamDocumentCollectionUUID | mongodb

    Interface ChangeStreamDocumentCollectionUUID

    interface ChangeStreamDocumentCollectionUUID {
        collectionUUID: Binary;
    }

    Hierarchy (view full)

    Properties

    collectionUUID +ChangeStreamDocumentCollectionUUID | mongodb

    Interface ChangeStreamDocumentCollectionUUID

    interface ChangeStreamDocumentCollectionUUID {
        collectionUUID: Binary;
    }

    Hierarchy (view full)

    Properties

    Properties

    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamDocumentCommon.html b/docs/Next/interfaces/ChangeStreamDocumentCommon.html index 17902de0178..59c37363ae4 100644 --- a/docs/Next/interfaces/ChangeStreamDocumentCommon.html +++ b/docs/Next/interfaces/ChangeStreamDocumentCommon.html @@ -1,22 +1,22 @@ -ChangeStreamDocumentCommon | mongodb

    Interface ChangeStreamDocumentCommon

    interface ChangeStreamDocumentCommon {
        _id: unknown;
        clusterTime?: Timestamp;
        lsid?: ServerSessionId;
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id +ChangeStreamDocumentCommon | mongodb

    Interface ChangeStreamDocumentCommon

    interface ChangeStreamDocumentCommon {
        _id: unknown;
        clusterTime?: Timestamp;
        lsid?: ServerSessionId;
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamDocumentKey.html b/docs/Next/interfaces/ChangeStreamDocumentKey.html index ddce969df6b..027f1501bb8 100644 --- a/docs/Next/interfaces/ChangeStreamDocumentKey.html +++ b/docs/Next/interfaces/ChangeStreamDocumentKey.html @@ -1,4 +1,4 @@ -ChangeStreamDocumentKey | mongodb

    Interface ChangeStreamDocumentKey<TSchema>

    interface ChangeStreamDocumentKey<TSchema> {
        documentKey: {
            _id: InferIdType<TSchema>;
            [shardKey: string]: any;
        };
    }

    Type Parameters

    Hierarchy (view full)

    Properties

    documentKey +ChangeStreamDocumentKey | mongodb

    Interface ChangeStreamDocumentKey<TSchema>

    interface ChangeStreamDocumentKey<TSchema> {
        documentKey: {
            _id: InferIdType<TSchema>;
            [shardKey: string]: any;
        };
    }

    Type Parameters

    Hierarchy (view full)

    Properties

    Properties

    documentKey: {
        _id: InferIdType<TSchema>;
        [shardKey: string]: any;
    }

    For unsharded collections this contains a single field _id. For sharded collections, this will contain all the components of the shard key

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamDocumentOperationDescription.html b/docs/Next/interfaces/ChangeStreamDocumentOperationDescription.html index 8e95ee188de..0dd53bbdbbe 100644 --- a/docs/Next/interfaces/ChangeStreamDocumentOperationDescription.html +++ b/docs/Next/interfaces/ChangeStreamDocumentOperationDescription.html @@ -1,5 +1,5 @@ -ChangeStreamDocumentOperationDescription | mongodb

    Interface ChangeStreamDocumentOperationDescription

    interface ChangeStreamDocumentOperationDescription {
        operationDescription?: Document;
    }

    Hierarchy (view full)

    Properties

    operationDescription? +ChangeStreamDocumentOperationDescription | mongodb

    Interface ChangeStreamDocumentOperationDescription

    interface ChangeStreamDocumentOperationDescription {
        operationDescription?: Document;
    }

    Hierarchy (view full)

    Properties

    operationDescription?: Document

    An description of the operation.

    Only present when the showExpandedEvents flag is enabled.

    6.1.0

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamDropDatabaseDocument.html b/docs/Next/interfaces/ChangeStreamDropDatabaseDocument.html index 51f1fe7054d..fa69af8b17a 100644 --- a/docs/Next/interfaces/ChangeStreamDropDatabaseDocument.html +++ b/docs/Next/interfaces/ChangeStreamDropDatabaseDocument.html @@ -1,5 +1,5 @@ ChangeStreamDropDatabaseDocument | mongodb

    Interface ChangeStreamDropDatabaseDocument

    interface ChangeStreamDropDatabaseDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        lsid?: ServerSessionId;
        ns: {
            db: string;
        };
        operationType: "dropDatabase";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamDropDatabaseDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        lsid?: ServerSessionId;
        ns: {
            db: string;
        };
        operationType: "dropDatabase";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id clusterTime? lsid? ns @@ -8,20 +8,20 @@ txnNumber?

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -
    ns: {
        db: string;
    }

    The database dropped

    -
    operationType: "dropDatabase"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    ns: {
        db: string;
    }

    The database dropped

    +
    operationType: "dropDatabase"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamDropDocument.html b/docs/Next/interfaces/ChangeStreamDropDocument.html index 2266ae328b9..4fdd7f1056c 100644 --- a/docs/Next/interfaces/ChangeStreamDropDocument.html +++ b/docs/Next/interfaces/ChangeStreamDropDocument.html @@ -1,5 +1,5 @@ ChangeStreamDropDocument | mongodb

    Interface ChangeStreamDropDocument

    interface ChangeStreamDropDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        ns: ChangeStreamNameSpace;
        operationType: "drop";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamDropDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        ns: ChangeStreamNameSpace;
        operationType: "drop";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    +
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -

    Namespace the drop event occurred on

    -
    operationType: "drop"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    Namespace the drop event occurred on

    +
    operationType: "drop"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamDropIndexDocument.html b/docs/Next/interfaces/ChangeStreamDropIndexDocument.html index fa3d45c4ba8..dde5fb0b59f 100644 --- a/docs/Next/interfaces/ChangeStreamDropIndexDocument.html +++ b/docs/Next/interfaces/ChangeStreamDropIndexDocument.html @@ -1,6 +1,6 @@ ChangeStreamDropIndexDocument | mongodb

    Interface ChangeStreamDropIndexDocument

    Only present when the showExpandedEvents flag is enabled.

    interface ChangeStreamDropIndexDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationDescription?: Document;
        operationType: "dropIndexes";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamDropIndexDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationDescription?: Document;
        operationType: "dropIndexes";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    +
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -
    operationDescription?: Document

    An description of the operation.

    +
    operationDescription?: Document

    An description of the operation.

    Only present when the showExpandedEvents flag is enabled.

    6.1.0

    -
    operationType: "dropIndexes"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    operationType: "dropIndexes"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamInsertDocument.html b/docs/Next/interfaces/ChangeStreamInsertDocument.html index c9817be7a7c..43f05312a19 100644 --- a/docs/Next/interfaces/ChangeStreamInsertDocument.html +++ b/docs/Next/interfaces/ChangeStreamInsertDocument.html @@ -1,5 +1,5 @@ ChangeStreamInsertDocument | mongodb

    Interface ChangeStreamInsertDocument<TSchema>

    interface ChangeStreamInsertDocument<TSchema> {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        documentKey: {
            _id: InferIdType<TSchema>;
            [shardKey: string]: any;
        };
        fullDocument: TSchema;
        lsid?: ServerSessionId;
        ns: ChangeStreamNameSpace;
        operationType: "insert";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Type Parameters

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamInsertDocument<TSchema> {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        documentKey: {
            _id: InferIdType<TSchema>;
            [shardKey: string]: any;
        };
        fullDocument: TSchema;
        lsid?: ServerSessionId;
        ns: ChangeStreamNameSpace;
        operationType: "insert";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Type Parameters

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    +
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -
    documentKey: {
        _id: InferIdType<TSchema>;
        [shardKey: string]: any;
    }

    For unsharded collections this contains a single field _id. +

    documentKey: {
        _id: InferIdType<TSchema>;
        [shardKey: string]: any;
    }

    For unsharded collections this contains a single field _id. For sharded collections, this will contain all the components of the shard key

    -
    fullDocument: TSchema

    This key will contain the document being inserted

    -

    The identifier for the session associated with the transaction. +

    fullDocument: TSchema

    This key will contain the document being inserted

    +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -

    Namespace the insert event occurred on

    -
    operationType: "insert"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    Namespace the insert event occurred on

    +
    operationType: "insert"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamInvalidateDocument.html b/docs/Next/interfaces/ChangeStreamInvalidateDocument.html index 861d3a522fa..c957a44fd47 100644 --- a/docs/Next/interfaces/ChangeStreamInvalidateDocument.html +++ b/docs/Next/interfaces/ChangeStreamInvalidateDocument.html @@ -1,5 +1,5 @@ ChangeStreamInvalidateDocument | mongodb

    Interface ChangeStreamInvalidateDocument

    interface ChangeStreamInvalidateDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        lsid?: ServerSessionId;
        operationType: "invalidate";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamInvalidateDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        lsid?: ServerSessionId;
        operationType: "invalidate";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -
    operationType: "invalidate"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    operationType: "invalidate"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamNameSpace.html b/docs/Next/interfaces/ChangeStreamNameSpace.html index 12b6d7fd0be..4194ae39b43 100644 --- a/docs/Next/interfaces/ChangeStreamNameSpace.html +++ b/docs/Next/interfaces/ChangeStreamNameSpace.html @@ -1,3 +1,3 @@ -ChangeStreamNameSpace | mongodb

    Interface ChangeStreamNameSpace

    interface ChangeStreamNameSpace {
        coll: string;
        db: string;
    }

    Properties

    coll +ChangeStreamNameSpace | mongodb

    Interface ChangeStreamNameSpace

    interface ChangeStreamNameSpace {
        coll: string;
        db: string;
    }

    Properties

    Properties

    coll: string
    db: string
    +

    Properties

    coll: string
    db: string
    diff --git a/docs/Next/interfaces/ChangeStreamOptions.html b/docs/Next/interfaces/ChangeStreamOptions.html index e13f660bc4f..bcc65b5a616 100644 --- a/docs/Next/interfaces/ChangeStreamOptions.html +++ b/docs/Next/interfaces/ChangeStreamOptions.html @@ -1,5 +1,5 @@ ChangeStreamOptions | mongodb

    Interface ChangeStreamOptions

    Options that can be passed to a ChangeStream. Note that startAfter, resumeAfter, and startAtOperationTime are all mutually exclusive, and the server will error if more than one is specified.

    -
    interface ChangeStreamOptions {
        allowDiskUse?: boolean;
        authdb?: string;
        batchSize?: number;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        cursor?: Document;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        fullDocument?: string;
        fullDocumentBeforeChange?: string;
        hint?: Hint;
        ignoreUndefined?: boolean;
        let?: Document;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        out?: string;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        resumeAfter?: unknown;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        showExpandedEvents?: boolean;
        startAfter?: unknown;
        startAtOperationTime?: Timestamp;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
    }

    Hierarchy

    Properties

    interface ChangeStreamOptions {
        allowDiskUse?: boolean;
        authdb?: string;
        batchSize?: number;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        cursor?: Document;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        fullDocument?: string;
        fullDocumentBeforeChange?: string;
        hint?: Hint;
        ignoreUndefined?: boolean;
        let?: Document;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        out?: string;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        resumeAfter?: unknown;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        showExpandedEvents?: boolean;
        startAfter?: unknown;
        startAtOperationTime?: Timestamp;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
    }

    Hierarchy

    Properties

    allowDiskUse?: boolean

    allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 >).

    -
    authdb?: string
    batchSize?: number

    The number of documents to return per batch.

    +
    authdb?: string
    batchSize?: number

    The number of documents to return per batch.

    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    bypassDocumentValidation?: boolean

    Allow driver to bypass schema validation.

    -
    checkKeys?: boolean

    the serializer will check if keys are valid.

    +
    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Specify collation.

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    cursor?: Document

    Return the query as cursor, on 2.6 > it returns as a real cursor on pre 2.6 it returns as an emulated cursor.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    cursor?: Document

    Return the query as cursor, on 2.6 > it returns as a real cursor on pre 2.6 it returns as an emulated cursor.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    This API is deprecated in favor of collection.aggregate().explain() or db.aggregate().explain().

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    fullDocument?: string

    Allowed values: 'updateLookup', 'whenAvailable', 'required'.

    When set to 'updateLookup', the change notification for partial updates @@ -67,21 +68,21 @@ if the post-image for this event is available.

    When set to 'required', the same behavior as 'whenAvailable' except that an error is raised if the post-image is not available.

    -
    fullDocumentBeforeChange?: string

    Allowed values: 'whenAvailable', 'required', 'off'.

    +
    fullDocumentBeforeChange?: string

    Allowed values: 'whenAvailable', 'required', 'off'.

    The default is to not send a value, which is equivalent to 'off'.

    When set to 'whenAvailable', configures the change stream to return the pre-image of the modified document for replace, update, and delete change events if it is available.

    When set to 'required', the same behavior as 'whenAvailable' except that an error is raised if the pre-image is not available.

    -
    hint?: Hint

    Add an index selection hint to an aggregation command

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    hint?: Hint

    Add an index selection hint to an aggregation command

    +
    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    -
    maxAwaitTimeMS?: number

    The maximum amount of time for the server to wait on new documents to satisfy a change stream query.

    -
    maxTimeMS?: number

    specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point.

    -
    noResponse?: boolean
    omitReadPreference?: boolean
    out?: string
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    maxAwaitTimeMS?: number

    The maximum amount of time for the server to wait on new documents to satisfy a change stream query.

    +
    maxTimeMS?: number

    Specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    out?: string
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -99,14 +100,14 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    resumeAfter?: unknown

    Allows you to start a changeStream after a specified event.

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    resumeAfter?: unknown

    Allows you to start a changeStream after a specified event.

    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    showExpandedEvents?: boolean

    When enabled, configures the change stream to include extra change events.

    +
    showExpandedEvents?: boolean

    When enabled, configures the change stream to include extra change events.

    -
    startAfter?: unknown

    Similar to resumeAfter, but will allow you to start after an invalidated event.

    +
    startAfter?: unknown

    Similar to resumeAfter, but will allow you to start after an invalidated event.

    startAtOperationTime?: Timestamp

    Will start the changeStream after the specified operationTime.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    startAtOperationTime?: Timestamp

    Will start the changeStream after the specified operationTime.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean
    +
    willRetryWrite?: boolean
    diff --git a/docs/Next/interfaces/ChangeStreamRefineCollectionShardKeyDocument.html b/docs/Next/interfaces/ChangeStreamRefineCollectionShardKeyDocument.html index befd4f2ec16..1b25e24a2a2 100644 --- a/docs/Next/interfaces/ChangeStreamRefineCollectionShardKeyDocument.html +++ b/docs/Next/interfaces/ChangeStreamRefineCollectionShardKeyDocument.html @@ -1,5 +1,5 @@ ChangeStreamRefineCollectionShardKeyDocument | mongodb

    Interface ChangeStreamRefineCollectionShardKeyDocument

    interface ChangeStreamRefineCollectionShardKeyDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationDescription?: Document;
        operationType: "refineCollectionShardKey";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamRefineCollectionShardKeyDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationDescription?: Document;
        operationType: "refineCollectionShardKey";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    +
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -
    operationDescription?: Document

    An description of the operation.

    +
    operationDescription?: Document

    An description of the operation.

    Only present when the showExpandedEvents flag is enabled.

    6.1.0

    -
    operationType: "refineCollectionShardKey"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    operationType: "refineCollectionShardKey"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamRenameDocument.html b/docs/Next/interfaces/ChangeStreamRenameDocument.html index fce74dc0a08..f4dac5c21c7 100644 --- a/docs/Next/interfaces/ChangeStreamRenameDocument.html +++ b/docs/Next/interfaces/ChangeStreamRenameDocument.html @@ -1,5 +1,5 @@ ChangeStreamRenameDocument | mongodb

    Interface ChangeStreamRenameDocument

    interface ChangeStreamRenameDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        ns: ChangeStreamNameSpace;
        operationType: "rename";
        splitEvent?: ChangeStreamSplitEvent;
        to: {
            coll: string;
            db: string;
        };
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamRenameDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        ns: ChangeStreamNameSpace;
        operationType: "rename";
        splitEvent?: ChangeStreamSplitEvent;
        to: {
            coll: string;
            db: string;
        };
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    +
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -

    The "from" namespace that the rename occurred on

    -
    operationType: "rename"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    The "from" namespace that the rename occurred on

    +
    operationType: "rename"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    to: {
        coll: string;
        db: string;
    }

    The new name for the ns.coll collection

    -
    txnNumber?: number

    The transaction number. +

    to: {
        coll: string;
        db: string;
    }

    The new name for the ns.coll collection

    +
    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamReplaceDocument.html b/docs/Next/interfaces/ChangeStreamReplaceDocument.html index f1df54ba0de..de7126d51ff 100644 --- a/docs/Next/interfaces/ChangeStreamReplaceDocument.html +++ b/docs/Next/interfaces/ChangeStreamReplaceDocument.html @@ -1,5 +1,5 @@ ChangeStreamReplaceDocument | mongodb

    Interface ChangeStreamReplaceDocument<TSchema>

    interface ChangeStreamReplaceDocument<TSchema> {
        _id: unknown;
        clusterTime?: Timestamp;
        documentKey: {
            _id: InferIdType<TSchema>;
            [shardKey: string]: any;
        };
        fullDocument: TSchema;
        fullDocumentBeforeChange?: TSchema;
        lsid?: ServerSessionId;
        ns: ChangeStreamNameSpace;
        operationType: "replace";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Type Parameters

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamReplaceDocument<TSchema> {
        _id: unknown;
        clusterTime?: Timestamp;
        documentKey: {
            _id: InferIdType<TSchema>;
            [shardKey: string]: any;
        };
        fullDocument: TSchema;
        fullDocumentBeforeChange?: TSchema;
        lsid?: ServerSessionId;
        ns: ChangeStreamNameSpace;
        operationType: "replace";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Type Parameters

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    documentKey: {
        _id: InferIdType<TSchema>;
        [shardKey: string]: any;
    }

    For unsharded collections this contains a single field _id. +

    documentKey: {
        _id: InferIdType<TSchema>;
        [shardKey: string]: any;
    }

    For unsharded collections this contains a single field _id. For sharded collections, this will contain all the components of the shard key

    -
    fullDocument: TSchema

    The fullDocument of a replace event represents the document after the insert of the replacement document

    -
    fullDocumentBeforeChange?: TSchema

    Contains the pre-image of the modified or deleted document if the +

    fullDocument: TSchema

    The fullDocument of a replace event represents the document after the insert of the replacement document

    +
    fullDocumentBeforeChange?: TSchema

    Contains the pre-image of the modified or deleted document if the pre-image is available for the change event and either 'required' or 'whenAvailable' was specified for the 'fullDocumentBeforeChange' option when creating the change stream. If 'whenAvailable' was specified but the pre-image is unavailable, this will be explicitly set to null.

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -

    Namespace the replace event occurred on

    -
    operationType: "replace"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    Namespace the replace event occurred on

    +
    operationType: "replace"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamReshardCollectionDocument.html b/docs/Next/interfaces/ChangeStreamReshardCollectionDocument.html index 663baebb853..2fb66d3550b 100644 --- a/docs/Next/interfaces/ChangeStreamReshardCollectionDocument.html +++ b/docs/Next/interfaces/ChangeStreamReshardCollectionDocument.html @@ -1,5 +1,5 @@ ChangeStreamReshardCollectionDocument | mongodb

    Interface ChangeStreamReshardCollectionDocument

    interface ChangeStreamReshardCollectionDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationDescription?: Document;
        operationType: "reshardCollection";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamReshardCollectionDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationDescription?: Document;
        operationType: "reshardCollection";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    +
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -
    operationDescription?: Document

    An description of the operation.

    +
    operationDescription?: Document

    An description of the operation.

    Only present when the showExpandedEvents flag is enabled.

    6.1.0

    -
    operationType: "reshardCollection"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    operationType: "reshardCollection"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamShardCollectionDocument.html b/docs/Next/interfaces/ChangeStreamShardCollectionDocument.html index 46768903b06..a635b56c078 100644 --- a/docs/Next/interfaces/ChangeStreamShardCollectionDocument.html +++ b/docs/Next/interfaces/ChangeStreamShardCollectionDocument.html @@ -1,5 +1,5 @@ ChangeStreamShardCollectionDocument | mongodb

    Interface ChangeStreamShardCollectionDocument

    interface ChangeStreamShardCollectionDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationDescription?: Document;
        operationType: "shardCollection";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamShardCollectionDocument {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        lsid?: ServerSessionId;
        operationDescription?: Document;
        operationType: "shardCollection";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
    }

    Hierarchy (view full)

    Properties

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    +
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -
    operationDescription?: Document

    An description of the operation.

    +
    operationDescription?: Document

    An description of the operation.

    Only present when the showExpandedEvents flag is enabled.

    6.1.0

    -
    operationType: "shardCollection"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    operationType: "shardCollection"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    +
    diff --git a/docs/Next/interfaces/ChangeStreamSplitEvent.html b/docs/Next/interfaces/ChangeStreamSplitEvent.html index 5d5109707dc..0a05c6725a8 100644 --- a/docs/Next/interfaces/ChangeStreamSplitEvent.html +++ b/docs/Next/interfaces/ChangeStreamSplitEvent.html @@ -1,5 +1,5 @@ -ChangeStreamSplitEvent | mongodb

    Interface ChangeStreamSplitEvent

    interface ChangeStreamSplitEvent {
        fragment: number;
        of: number;
    }

    Properties

    fragment +ChangeStreamSplitEvent | mongodb

    Interface ChangeStreamSplitEvent

    interface ChangeStreamSplitEvent {
        fragment: number;
        of: number;
    }

    Properties

    Properties

    fragment: number

    Which fragment of the change this is.

    -
    of: number

    The total number of fragments.

    -
    +
    of: number

    The total number of fragments.

    +
    diff --git a/docs/Next/interfaces/ChangeStreamUpdateDocument.html b/docs/Next/interfaces/ChangeStreamUpdateDocument.html index b20b7cf2d79..2f2e9b71170 100644 --- a/docs/Next/interfaces/ChangeStreamUpdateDocument.html +++ b/docs/Next/interfaces/ChangeStreamUpdateDocument.html @@ -1,5 +1,5 @@ ChangeStreamUpdateDocument | mongodb

    Interface ChangeStreamUpdateDocument<TSchema>

    interface ChangeStreamUpdateDocument<TSchema> {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        documentKey: {
            _id: InferIdType<TSchema>;
            [shardKey: string]: any;
        };
        fullDocument?: TSchema;
        fullDocumentBeforeChange?: TSchema;
        lsid?: ServerSessionId;
        ns: ChangeStreamNameSpace;
        operationType: "update";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
        updateDescription: UpdateDescription<TSchema>;
    }

    Type Parameters

    Hierarchy (view full)

    Properties

    _id +
    interface ChangeStreamUpdateDocument<TSchema> {
        _id: unknown;
        clusterTime?: Timestamp;
        collectionUUID: Binary;
        documentKey: {
            _id: InferIdType<TSchema>;
            [shardKey: string]: any;
        };
        fullDocument?: TSchema;
        fullDocumentBeforeChange?: TSchema;
        lsid?: ServerSessionId;
        ns: ChangeStreamNameSpace;
        operationType: "update";
        splitEvent?: ChangeStreamSplitEvent;
        txnNumber?: number;
        updateDescription: UpdateDescription<TSchema>;
    }

    Type Parameters

    Hierarchy (view full)

    Properties

    _id: unknown

    The id functions as an opaque token for use when resuming an interrupted change stream.

    -
    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. +

    clusterTime?: Timestamp

    The timestamp from the oplog entry associated with the event. For events that happened as part of a multi-document transaction, the associated change stream notifications will have the same clusterTime value, namely the time when the transaction was committed. On a sharded cluster, events that occur on different shards can have the same clusterTime but be associated with different transactions or even not be associated with any transaction. To identify events for a single transaction, you can use the combination of lsid and txnNumber in the change stream event document.

    -
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    +
    collectionUUID: Binary

    The UUID (Binary subtype 4) of the collection that the operation was performed on.

    Only present when the showExpandedEvents flag is enabled.

    NOTE: collectionUUID will be converted to a NodeJS Buffer if the promoteBuffers flag is enabled.

    6.1.0

    -
    documentKey: {
        _id: InferIdType<TSchema>;
        [shardKey: string]: any;
    }

    For unsharded collections this contains a single field _id. +

    documentKey: {
        _id: InferIdType<TSchema>;
        [shardKey: string]: any;
    }

    For unsharded collections this contains a single field _id. For sharded collections, this will contain all the components of the shard key

    -
    fullDocument?: TSchema

    This is only set if fullDocument is set to 'updateLookup' +

    fullDocument?: TSchema

    This is only set if fullDocument is set to 'updateLookup' Contains the point-in-time post-image of the modified document if the post-image is available and either 'required' or 'whenAvailable' was specified for the 'fullDocument' option when creating the change stream.

    -
    fullDocumentBeforeChange?: TSchema

    Contains the pre-image of the modified or deleted document if the +

    fullDocumentBeforeChange?: TSchema

    Contains the pre-image of the modified or deleted document if the pre-image is available for the change event and either 'required' or 'whenAvailable' was specified for the 'fullDocumentBeforeChange' option when creating the change stream. If 'whenAvailable' was specified but the pre-image is unavailable, this will be explicitly set to null.

    -

    The identifier for the session associated with the transaction. +

    The identifier for the session associated with the transaction. Only present if the operation is part of a multi-document transaction.

    -

    Namespace the update event occurred on

    -
    operationType: "update"

    Describes the type of operation represented in this change notification

    -

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent +

    Namespace the update event occurred on

    +
    operationType: "update"

    Describes the type of operation represented in this change notification

    +

    When the change stream's backing aggregation pipeline contains the $changeStreamSplitLargeEvent stage, events larger than 16MB will be split into multiple events and contain the following information about which fragment the current event is.

    -
    txnNumber?: number

    The transaction number. +

    txnNumber?: number

    The transaction number. Only present if the operation is part of a multi-document transaction.

    NOTE: txnNumber can be a Long if promoteLongs is set to false

    -
    updateDescription: UpdateDescription<TSchema>

    Contains a description of updated and removed fields in this operation

    -
    +
    updateDescription: UpdateDescription<TSchema>

    Contains a description of updated and removed fields in this operation

    +
    diff --git a/docs/Next/interfaces/ClientBulkWriteOptions.html b/docs/Next/interfaces/ClientBulkWriteOptions.html index 7da272ce0ba..a6ca4d4ec97 100644 --- a/docs/Next/interfaces/ClientBulkWriteOptions.html +++ b/docs/Next/interfaces/ClientBulkWriteOptions.html @@ -1,4 +1,4 @@ -ClientBulkWriteOptions | mongodb

    Interface ClientBulkWriteOptions

    interface ClientBulkWriteOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        let?: Document;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        ordered?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        useBigInt64?: boolean;
        verboseResults?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +ClientBulkWriteOptions | mongodb

    Interface ClientBulkWriteOptions

    interface ClientBulkWriteOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        let?: Document;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        ordered?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        timeoutMS?: number;
        useBigInt64?: boolean;
        verboseResults?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    bypassDocumentValidation?: boolean

    Allow driver to bypass schema validation.

    false - documents will be validated by default

    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    -
    maxTimeMS?: number
    noResponse?: boolean
    omitReadPreference?: boolean
    ordered?: boolean

    If true, when an insert fails, don't execute the remaining writes. +

    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    ordered?: boolean

    If true, when an insert fails, don't execute the remaining writes. If false, continue with remaining inserts when one fails.

    true - inserts are ordered by default

    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    @@ -67,14 +69,15 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    verboseResults?: boolean

    Whether detailed results for each successful operation should be included in the returned BulkWriteResult.

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/ClientEncryptionCreateDataKeyProviderOptions.html b/docs/Next/interfaces/ClientEncryptionCreateDataKeyProviderOptions.html index 9f7beb8f54f..7505365a26d 100644 --- a/docs/Next/interfaces/ClientEncryptionCreateDataKeyProviderOptions.html +++ b/docs/Next/interfaces/ClientEncryptionCreateDataKeyProviderOptions.html @@ -1,8 +1,8 @@ ClientEncryptionCreateDataKeyProviderOptions | mongodb

    Interface ClientEncryptionCreateDataKeyProviderOptions

    Options to provide when creating a new data key.

    -
    interface ClientEncryptionCreateDataKeyProviderOptions {
        keyAltNames?: string[];
        keyMaterial?: Buffer | Binary;
        masterKey?:
            | AWSEncryptionKeyOptions
            | AzureEncryptionKeyOptions
            | GCPEncryptionKeyOptions
            | KMIPEncryptionKeyOptions;
    }

    Properties

    interface ClientEncryptionCreateDataKeyProviderOptions {
        keyAltNames?: string[];
        keyMaterial?: Buffer | Binary;
        masterKey?:
            | AWSEncryptionKeyOptions
            | AzureEncryptionKeyOptions
            | GCPEncryptionKeyOptions
            | KMIPEncryptionKeyOptions;
    }

    Properties

    keyAltNames?: string[]

    An optional list of string alternate names used to reference a key. If a key is created with alternate names, then encryption may refer to the key by the unique alternate name instead of by _id.

    -
    keyMaterial?: Buffer | Binary

    Identifies a new KMS-specific key used to encrypt the new data key

    -
    +
    keyMaterial?: Buffer | Binary

    Identifies a new KMS-specific key used to encrypt the new data key

    +
    diff --git a/docs/Next/interfaces/ClientEncryptionEncryptOptions.html b/docs/Next/interfaces/ClientEncryptionEncryptOptions.html index b6e665aad5d..3b7bf54ef3f 100644 --- a/docs/Next/interfaces/ClientEncryptionEncryptOptions.html +++ b/docs/Next/interfaces/ClientEncryptionEncryptOptions.html @@ -1,14 +1,14 @@ ClientEncryptionEncryptOptions | mongodb

    Interface ClientEncryptionEncryptOptions

    Options to provide when encrypting data.

    -
    interface ClientEncryptionEncryptOptions {
        algorithm:
            | "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic"
            | "AEAD_AES_256_CBC_HMAC_SHA_512-Random"
            | "Indexed"
            | "Unindexed"
            | "Range";
        contentionFactor?: number | bigint;
        keyAltName?: string;
        keyId?: Binary;
        queryType?: "equality" | "range";
        rangeOptions?: RangeOptions;
    }

    Properties

    interface ClientEncryptionEncryptOptions {
        algorithm:
            | "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic"
            | "AEAD_AES_256_CBC_HMAC_SHA_512-Random"
            | "Indexed"
            | "Unindexed"
            | "Range";
        contentionFactor?: number | bigint;
        keyAltName?: string;
        keyId?: Binary;
        queryType?: "equality" | "range";
        rangeOptions?: RangeOptions;
    }

    Properties

    algorithm:
        | "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic"
        | "AEAD_AES_256_CBC_HMAC_SHA_512-Random"
        | "Indexed"
        | "Unindexed"
        | "Range"

    The algorithm to use for encryption.

    -
    contentionFactor?: number | bigint

    The contention factor.

    -
    keyAltName?: string

    A unique string name corresponding to an already existing dataKey.

    -
    keyId?: Binary

    The id of the Binary dataKey to use for encryption

    -
    queryType?: "equality" | "range"

    The query type.

    -
    rangeOptions?: RangeOptions

    The index options for a Queryable Encryption field supporting "range" queries.

    -
    +
    contentionFactor?: number | bigint

    The contention factor.

    +
    keyAltName?: string

    A unique string name corresponding to an already existing dataKey.

    +
    keyId?: Binary

    The id of the Binary dataKey to use for encryption

    +
    queryType?: "equality" | "range"

    The query type.

    +
    rangeOptions?: RangeOptions

    The index options for a Queryable Encryption field supporting "range" queries.

    +
    diff --git a/docs/Next/interfaces/ClientEncryptionOptions.html b/docs/Next/interfaces/ClientEncryptionOptions.html index 7374dff24e9..561531e9b17 100644 --- a/docs/Next/interfaces/ClientEncryptionOptions.html +++ b/docs/Next/interfaces/ClientEncryptionOptions.html @@ -1,12 +1,24 @@ ClientEncryptionOptions | mongodb

    Interface ClientEncryptionOptions

    Additional settings to provide when creating a new ClientEncryption instance.

    -
    interface ClientEncryptionOptions {
        keyVaultClient?: MongoClient;
        keyVaultNamespace: string;
        kmsProviders?: KMSProviders;
        proxyOptions?: ProxyOptions;
        tlsOptions?: CSFLEKMSTlsOptions;
    }

    Properties

    interface ClientEncryptionOptions {
        keyVaultClient?: MongoClient;
        keyVaultNamespace: string;
        kmsProviders?: KMSProviders;
        proxyOptions?: ProxyOptions;
        timeoutMS?: number;
        tlsOptions?: CSFLEKMSTlsOptions;
    }

    Properties

    keyVaultClient?: MongoClient

    A MongoClient used to fetch keys from a key vault. Defaults to client.

    -
    keyVaultNamespace: string

    The namespace of the key vault, used to store encryption keys

    -
    kmsProviders?: KMSProviders

    Options for specific KMS providers to use

    -
    proxyOptions?: ProxyOptions

    Options for specifying a Socks5 proxy to use for connecting to the KMS.

    -
    tlsOptions?: CSFLEKMSTlsOptions

    TLS options for kms providers to use.

    -
    +
    keyVaultNamespace: string

    The namespace of the key vault, used to store encryption keys

    +
    kmsProviders?: KMSProviders

    Options for specific KMS providers to use

    +
    proxyOptions?: ProxyOptions

    Options for specifying a Socks5 proxy to use for connecting to the KMS.

    +
    timeoutMS?: number

    The timeout setting to be used for all the operations on ClientEncryption.

    +

    When provided, timeoutMS is used as the timeout for each operation executed on +the ClientEncryption object. For example:

    +
    const clientEncryption = new ClientEncryption(client, {
    timeoutMS: 1_000
    kmsProviders: { local: { key: '<KEY>' } }
    });

    // `1_000` is used as the timeout for createDataKey call
    await clientEncryption.createDataKey('local'); +
    + +

    If timeoutMS is configured on the provided client, the client's timeoutMS value +will be used unless timeoutMS is also provided as a client encryption option.

    +
    const client = new MongoClient('<uri>', { timeoutMS: 2_000 });

    // timeoutMS is set to 1_000 on clientEncryption
    const clientEncryption = new ClientEncryption(client, {
    timeoutMS: 1_000
    kmsProviders: { local: { key: '<KEY>' } }
    }); +
    + +
    tlsOptions?: CSFLEKMSTlsOptions

    TLS options for kms providers to use.

    +
    diff --git a/docs/Next/interfaces/ClientEncryptionRewrapManyDataKeyProviderOptions.html b/docs/Next/interfaces/ClientEncryptionRewrapManyDataKeyProviderOptions.html index 3a7ec63ac94..4dd9fe16fe1 100644 --- a/docs/Next/interfaces/ClientEncryptionRewrapManyDataKeyProviderOptions.html +++ b/docs/Next/interfaces/ClientEncryptionRewrapManyDataKeyProviderOptions.html @@ -1,3 +1,3 @@ -ClientEncryptionRewrapManyDataKeyProviderOptions | mongodb

    Interface ClientEncryptionRewrapManyDataKeyProviderOptionsExperimental

    interface ClientEncryptionRewrapManyDataKeyProviderOptions {
        masterKey?:
            | AWSEncryptionKeyOptions
            | AzureEncryptionKeyOptions
            | GCPEncryptionKeyOptions
            | KMIPEncryptionKeyOptions;
        provider: keyof KMSProviders;
    }

    Properties

    masterKey? +ClientEncryptionRewrapManyDataKeyProviderOptions | mongodb

    Interface ClientEncryptionRewrapManyDataKeyProviderOptionsExperimental

    interface ClientEncryptionRewrapManyDataKeyProviderOptions {
        masterKey?:
            | AWSEncryptionKeyOptions
            | AzureEncryptionKeyOptions
            | GCPEncryptionKeyOptions
            | KMIPEncryptionKeyOptions;
        provider: keyof KMSProviders;
    }

    Properties

    Properties

    provider: keyof KMSProviders
    +

    Properties

    provider: keyof KMSProviders
    diff --git a/docs/Next/interfaces/ClientEncryptionRewrapManyDataKeyResult.html b/docs/Next/interfaces/ClientEncryptionRewrapManyDataKeyResult.html index 6f799a87331..14c4ed98f18 100644 --- a/docs/Next/interfaces/ClientEncryptionRewrapManyDataKeyResult.html +++ b/docs/Next/interfaces/ClientEncryptionRewrapManyDataKeyResult.html @@ -1,3 +1,3 @@ -ClientEncryptionRewrapManyDataKeyResult | mongodb

    Interface ClientEncryptionRewrapManyDataKeyResultExperimental

    interface ClientEncryptionRewrapManyDataKeyResult {
        bulkWriteResult?: BulkWriteResult;
    }

    Properties

    bulkWriteResult? +ClientEncryptionRewrapManyDataKeyResult | mongodb

    Interface ClientEncryptionRewrapManyDataKeyResultExperimental

    interface ClientEncryptionRewrapManyDataKeyResult {
        bulkWriteResult?: BulkWriteResult;
    }

    Properties

    Properties

    bulkWriteResult?: BulkWriteResult

    The result of rewrapping data keys. If unset, no keys matched the filter.

    -
    +
    diff --git a/docs/Next/interfaces/ClientSessionOptions.html b/docs/Next/interfaces/ClientSessionOptions.html index 82e6bbabc13..7081074d6a6 100644 --- a/docs/Next/interfaces/ClientSessionOptions.html +++ b/docs/Next/interfaces/ClientSessionOptions.html @@ -1,7 +1,10 @@ -ClientSessionOptions | mongodb

    Interface ClientSessionOptions

    interface ClientSessionOptions {
        causalConsistency?: boolean;
        defaultTransactionOptions?: TransactionOptions;
        snapshot?: boolean;
    }

    Properties

    causalConsistency? +ClientSessionOptions | mongodb

    Interface ClientSessionOptions

    interface ClientSessionOptions {
        causalConsistency?: boolean;
        defaultTimeoutMS?: number;
        defaultTransactionOptions?: TransactionOptions;
        snapshot?: boolean;
    }

    Properties

    causalConsistency?: boolean

    Whether causal consistency should be enabled on this session

    -
    defaultTransactionOptions?: TransactionOptions

    The default TransactionOptions to use for transactions started on this session.

    -
    snapshot?: boolean

    Whether all read operations should be read from the same snapshot for this session (NOTE: not compatible with causalConsistency=true)

    -
    +
    defaultTimeoutMS?: number

    An overriding timeoutMS value to use for a client-side timeout. +If not provided the session uses the timeoutMS specified on the MongoClient.

    +
    defaultTransactionOptions?: TransactionOptions

    The default TransactionOptions to use for transactions started on this session.

    +
    snapshot?: boolean

    Whether all read operations should be read from the same snapshot for this session (NOTE: not compatible with causalConsistency=true)

    +
    diff --git a/docs/Next/interfaces/ClusteredCollectionOptions.html b/docs/Next/interfaces/ClusteredCollectionOptions.html index f11da616f0a..28ed506b02c 100644 --- a/docs/Next/interfaces/ClusteredCollectionOptions.html +++ b/docs/Next/interfaces/ClusteredCollectionOptions.html @@ -1,6 +1,6 @@ ClusteredCollectionOptions | mongodb

    Interface ClusteredCollectionOptions

    Configuration options for clustered collections

    interface ClusteredCollectionOptions {
        key: Document;
        name?: string;
        unique: boolean;
    }

    Hierarchy (view full)

    Properties

    key +
    interface ClusteredCollectionOptions {
        key: Document;
        name?: string;
        unique: boolean;
    }

    Hierarchy (view full)

    Properties

    Properties

    name?: string
    unique: boolean
    +

    Properties

    name?: string
    unique: boolean
    diff --git a/docs/Next/interfaces/CollationOptions.html b/docs/Next/interfaces/CollationOptions.html index 840eb6f2a59..164be557f78 100644 --- a/docs/Next/interfaces/CollationOptions.html +++ b/docs/Next/interfaces/CollationOptions.html @@ -1,4 +1,4 @@ -CollationOptions | mongodb

    Interface CollationOptions

    interface CollationOptions {
        alternate?: string;
        backwards?: boolean;
        caseFirst?: string;
        caseLevel?: boolean;
        locale: string;
        maxVariable?: string;
        normalization?: boolean;
        numericOrdering?: boolean;
        strength?: number;
    }

    Properties

    alternate? +CollationOptions | mongodb

    Interface CollationOptions

    interface CollationOptions {
        alternate?: string;
        backwards?: boolean;
        caseFirst?: string;
        caseLevel?: boolean;
        locale: string;
        maxVariable?: string;
        normalization?: boolean;
        numericOrdering?: boolean;
        strength?: number;
    }

    Properties

    alternate?: string
    backwards?: boolean
    caseFirst?: string
    caseLevel?: boolean
    locale: string
    maxVariable?: string
    normalization?: boolean
    numericOrdering?: boolean
    strength?: number
    +

    Properties

    alternate?: string
    backwards?: boolean
    caseFirst?: string
    caseLevel?: boolean
    locale: string
    maxVariable?: string
    normalization?: boolean
    numericOrdering?: boolean
    strength?: number
    diff --git a/docs/Next/interfaces/CollectionInfo.html b/docs/Next/interfaces/CollectionInfo.html index 53ac2f10501..c1ea6534533 100644 --- a/docs/Next/interfaces/CollectionInfo.html +++ b/docs/Next/interfaces/CollectionInfo.html @@ -1,6 +1,6 @@ -CollectionInfo | mongodb

    Interface CollectionInfo

    interface CollectionInfo {
        idIndex?: Document;
        info?: {
            readOnly?: false;
            uuid?: Binary;
        };
        name: string;
        options?: Document;
        type?: string;
    }

    Hierarchy (view full)

    Properties

    idIndex? +CollectionInfo | mongodb

    Interface CollectionInfo

    interface CollectionInfo {
        idIndex?: Document;
        info?: {
            readOnly?: false;
            uuid?: Binary;
        };
        name: string;
        options?: Document;
        type?: string;
    }

    Hierarchy (view full)

    Properties

    idIndex?: Document
    info?: {
        readOnly?: false;
        uuid?: Binary;
    }
    name: string
    options?: Document
    type?: string
    +

    Properties

    idIndex?: Document
    info?: {
        readOnly?: false;
        uuid?: Binary;
    }
    name: string
    options?: Document
    type?: string
    diff --git a/docs/Next/interfaces/CollectionOptions.html b/docs/Next/interfaces/CollectionOptions.html index c92ce420277..661734d9eb5 100644 --- a/docs/Next/interfaces/CollectionOptions.html +++ b/docs/Next/interfaces/CollectionOptions.html @@ -1,4 +1,4 @@ -CollectionOptions | mongodb

    Interface CollectionOptions

    interface CollectionOptions {
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        enableUtf8Validation?: boolean;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        serializeFunctions?: boolean;
        useBigInt64?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    bsonRegExp? +CollectionOptions | mongodb

    Interface CollectionOptions

    interface CollectionOptions {
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        enableUtf8Validation?: boolean;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        serializeFunctions?: boolean;
        timeoutMS?: number;
        useBigInt64?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    @@ -43,7 +44,8 @@
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).

    serializeFunctions?: boolean

    serialize the javascript functions

    false

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    Write Concern as an object

    -
    +
    diff --git a/docs/Next/interfaces/CommandOperationOptions.html b/docs/Next/interfaces/CommandOperationOptions.html index 1bd33a48366..191dca26abb 100644 --- a/docs/Next/interfaces/CommandOperationOptions.html +++ b/docs/Next/interfaces/CommandOperationOptions.html @@ -1,4 +1,4 @@ -CommandOperationOptions | mongodb

    Interface CommandOperationOptions

    interface CommandOperationOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +CommandOperationOptions | mongodb

    Interface CommandOperationOptions

    interface CommandOperationOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    -
    maxTimeMS?: number
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -57,12 +59,13 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/ConnectionOptions.html b/docs/Next/interfaces/ConnectionOptions.html index c8621e5c201..649099fc5b3 100644 --- a/docs/Next/interfaces/ConnectionOptions.html +++ b/docs/Next/interfaces/ConnectionOptions.html @@ -1,4 +1,4 @@ -ConnectionOptions | mongodb

    Interface ConnectionOptions

    interface ConnectionOptions {
        allowPartialTrustChain?: boolean;
        ALPNProtocols?: Uint8Array | string[] | Uint8Array[];
        autoSelectFamily?: boolean;
        autoSelectFamilyAttemptTimeout?: number;
        ca?: string | Buffer | (string | Buffer)[];
        cancellationToken?: CancellationToken;
        cert?: string | Buffer | (string | Buffer)[];
        checkServerIdentity?: ((hostname: string, cert: PeerCertificate) => Error | undefined);
        ciphers?: string;
        compressors?: (
            | "none"
            | "snappy"
            | "zlib"
            | "zstd")[];
        connectTimeoutMS?: number;
        credentials?: MongoCredentials;
        crl?: string | Buffer | (string | Buffer)[];
        ecdhCurve?: string;
        family?: number;
        generation: number;
        hints?: number;
        hostAddress: HostAddress;
        id: number | "<monitor>";
        key?: string | Buffer | (string | Buffer | KeyObject)[];
        loadBalanced: boolean;
        localAddress?: string;
        localPort?: number;
        logicalSessionTimeoutMinutes?: number;
        lookup?: LookupFunction;
        metadata: ClientMetadata;
        minDHSize?: number;
        monitorCommands: boolean;
        noDelay?: boolean;
        passphrase?: string;
        pfx?: string | Buffer | (string | Buffer | PxfObject)[];
        proxyHost?: string;
        proxyPassword?: string;
        proxyPort?: number;
        proxyUsername?: string;
        rejectUnauthorized?: boolean;
        secureContext?: SecureContext;
        secureProtocol?: string;
        serverApi?: ServerApi;
        servername?: string;
        session?: Buffer;
        socketTimeoutMS?: number;
        tls: boolean;
    }

    Hierarchy (view full)

    Properties

    allowPartialTrustChain? +ConnectionOptions | mongodb

    Interface ConnectionOptions

    interface ConnectionOptions {
        allowPartialTrustChain?: boolean;
        ALPNProtocols?: Uint8Array | string[] | Uint8Array[];
        autoSelectFamily?: boolean;
        autoSelectFamilyAttemptTimeout?: number;
        ca?: string | Buffer | (string | Buffer)[];
        cancellationToken?: CancellationToken;
        cert?: string | Buffer | (string | Buffer)[];
        checkServerIdentity?: ((hostname: string, cert: PeerCertificate) => Error | undefined);
        ciphers?: string;
        compressors?: (
            | "none"
            | "snappy"
            | "zlib"
            | "zstd")[];
        connectTimeoutMS?: number;
        credentials?: MongoCredentials;
        crl?: string | Buffer | (string | Buffer)[];
        ecdhCurve?: string;
        family?: number;
        generation: number;
        hints?: number;
        hostAddress: HostAddress;
        id: number | "<monitor>";
        key?: string | Buffer | (string | Buffer | KeyObject)[];
        loadBalanced: boolean;
        localAddress?: string;
        localPort?: number;
        logicalSessionTimeoutMinutes?: number;
        lookup?: LookupFunction;
        metadata: ClientMetadata;
        minDHSize?: number;
        monitorCommands: boolean;
        noDelay?: boolean;
        passphrase?: string;
        pfx?: string | Buffer | (string | Buffer | PxfObject)[];
        proxyHost?: string;
        proxyPassword?: string;
        proxyPort?: number;
        proxyUsername?: string;
        rejectUnauthorized?: boolean;
        secureContext?: SecureContext;
        secureProtocol?: string;
        serverApi?: ServerApi;
        servername?: string;
        session?: Buffer;
        socketTimeoutMS?: number;
        tls: boolean;
    }

    Hierarchy (view full)

    Properties

    ca?: string | Buffer | (string | Buffer)[]

    Optionally override the trusted CA certificates. Default is to trust the well-known CAs curated by Mozilla. Mozilla's CAs are completely replaced when CAs are explicitly specified using this option.

    -
    cancellationToken?: CancellationToken
    cert?: string | Buffer | (string | Buffer)[]

    Cert chains in PEM format. One cert chain should be provided per +

    cancellationToken?: CancellationToken
    cert?: string | Buffer | (string | Buffer)[]

    Cert chains in PEM format. One cert chain should be provided per private key. Each cert chain should consist of the PEM formatted certificate for a provided private key, followed by the PEM formatted intermediate certificates (if any), in order, and not @@ -77,7 +77,7 @@ information, see modifying the default cipher suite. Permitted ciphers can be obtained via tls.getCiphers(). Cipher names must be uppercased in order for OpenSSL to accept them.

    -
    compressors?: (
        | "none"
        | "snappy"
        | "zlib"
        | "zstd")[]
    connectTimeoutMS?: number
    credentials?: MongoCredentials
    crl?: string | Buffer | (string | Buffer)[]

    PEM formatted CRLs (Certificate Revocation Lists).

    +
    compressors?: (
        | "none"
        | "snappy"
        | "zlib"
        | "zstd")[]
    connectTimeoutMS?: number
    credentials?: MongoCredentials
    crl?: string | Buffer | (string | Buffer)[]

    PEM formatted CRLs (Certificate Revocation Lists).

    ecdhCurve?: string

    A string describing a named curve or a colon separated list of curve NIDs or names, for example P-521:P-384:P-256, to use for ECDH key agreement. Set to auto to select the curve automatically. Use @@ -85,7 +85,7 @@ recent releases, openssl ecparam -list_curves will also display the name and description of each available elliptic curve. Default: tls.DEFAULT_ECDH_CURVE.

    -
    family?: number
    generation: number
    hints?: number
    hostAddress: HostAddress
    id: number | "<monitor>"
    key?: string | Buffer | (string | Buffer | KeyObject)[]

    Private keys in PEM format. PEM allows the option of private keys +

    family?: number
    generation: number
    hints?: number
    hostAddress: HostAddress
    id: number | "<monitor>"
    key?: string | Buffer | (string | Buffer | KeyObject)[]

    Private keys in PEM format. PEM allows the option of private keys being encrypted. Encrypted keys will be decrypted with options.passphrase. Multiple keys using different algorithms can be provided either as an array of unencrypted key strings or buffers, @@ -93,7 +93,7 @@ passphrase: ]}. The object form can only occur in an array. object.passphrase is optional. Encrypted keys will be decrypted with object.passphrase if provided, or options.passphrase if it is not.

    -
    loadBalanced: boolean
    localAddress?: string
    localPort?: number
    logicalSessionTimeoutMinutes?: number
    lookup?: LookupFunction
    metadata: ClientMetadata
    minDHSize?: number
    monitorCommands: boolean
    noDelay?: boolean
    passphrase?: string

    Shared passphrase used for a single private key and/or a PFX.

    +
    loadBalanced: boolean
    localAddress?: string
    localPort?: number
    logicalSessionTimeoutMinutes?: number
    lookup?: LookupFunction
    metadata: ClientMetadata
    minDHSize?: number
    monitorCommands: boolean
    noDelay?: boolean
    passphrase?: string

    Shared passphrase used for a single private key and/or a PFX.

    pfx?: string | Buffer | (string | Buffer | PxfObject)[]

    PFX or PKCS12 encoded private key and certificate chain. pfx is an alternative to providing key and cert individually. PFX is usually encrypted, if it is, passphrase will be used to decrypt it. Multiple @@ -102,7 +102,7 @@ passphrase: ]}. The object form can only occur in an array. object.passphrase is optional. Encrypted PFX will be decrypted with object.passphrase if provided, or options.passphrase if it is not.

    -
    proxyHost?: string
    proxyPassword?: string
    proxyPort?: number
    proxyUsername?: string
    rejectUnauthorized?: boolean

    If true the server will reject any connection which is not +

    proxyHost?: string
    proxyPassword?: string
    proxyPort?: number
    proxyUsername?: string
    rejectUnauthorized?: boolean

    If true the server will reject any connection which is not authorized with the list of supplied CAs. This option only has an effect if requestCert is true.

    true
    @@ -118,5 +118,5 @@
     any TLS protocol version up to TLSv1.3. It is not recommended to use
     TLS versions less than 1.2, but it may be required for
     interoperability. Default: none, see minVersion.

    -
    serverApi?: ServerApi
    servername?: string
    session?: Buffer

    An optional Buffer instance containing a TLS session.

    -
    socketTimeoutMS?: number
    tls: boolean
    +
    serverApi?: ServerApi
    servername?: string
    session?: Buffer

    An optional Buffer instance containing a TLS session.

    +
    socketTimeoutMS?: number
    tls: boolean
    diff --git a/docs/Next/interfaces/ConnectionPoolOptions.html b/docs/Next/interfaces/ConnectionPoolOptions.html index 701a0cd6a45..109575a8a99 100644 --- a/docs/Next/interfaces/ConnectionPoolOptions.html +++ b/docs/Next/interfaces/ConnectionPoolOptions.html @@ -1,4 +1,4 @@ -ConnectionPoolOptions | mongodb

    Interface ConnectionPoolOptions

    interface ConnectionPoolOptions {
        allowPartialTrustChain?: boolean;
        ALPNProtocols?: Uint8Array | string[] | Uint8Array[];
        autoSelectFamily?: boolean;
        autoSelectFamilyAttemptTimeout?: number;
        ca?: string | Buffer | (string | Buffer)[];
        cancellationToken?: CancellationToken;
        cert?: string | Buffer | (string | Buffer)[];
        checkServerIdentity?: ((hostname: string, cert: PeerCertificate) => Error | undefined);
        ciphers?: string;
        compressors?: (
            | "none"
            | "snappy"
            | "zlib"
            | "zstd")[];
        connectTimeoutMS?: number;
        credentials?: MongoCredentials;
        crl?: string | Buffer | (string | Buffer)[];
        ecdhCurve?: string;
        family?: number;
        hints?: number;
        hostAddress: HostAddress;
        key?: string | Buffer | (string | Buffer | KeyObject)[];
        loadBalanced: boolean;
        localAddress?: string;
        localPort?: number;
        logicalSessionTimeoutMinutes?: number;
        lookup?: LookupFunction;
        maxConnecting: number;
        maxIdleTimeMS: number;
        maxPoolSize: number;
        metadata: ClientMetadata;
        minDHSize?: number;
        minPoolSize: number;
        monitorCommands: boolean;
        noDelay?: boolean;
        passphrase?: string;
        pfx?: string | Buffer | (string | Buffer | PxfObject)[];
        proxyHost?: string;
        proxyPassword?: string;
        proxyPort?: number;
        proxyUsername?: string;
        rejectUnauthorized?: boolean;
        secureContext?: SecureContext;
        secureProtocol?: string;
        serverApi?: ServerApi;
        servername?: string;
        session?: Buffer;
        socketTimeoutMS?: number;
        tls: boolean;
        waitQueueTimeoutMS: number;
    }

    Hierarchy

    Properties

    allowPartialTrustChain? +ConnectionPoolOptions | mongodb

    Interface ConnectionPoolOptions

    interface ConnectionPoolOptions {
        allowPartialTrustChain?: boolean;
        ALPNProtocols?: Uint8Array | string[] | Uint8Array[];
        autoSelectFamily?: boolean;
        autoSelectFamilyAttemptTimeout?: number;
        ca?: string | Buffer | (string | Buffer)[];
        cancellationToken?: CancellationToken;
        cert?: string | Buffer | (string | Buffer)[];
        checkServerIdentity?: ((hostname: string, cert: PeerCertificate) => Error | undefined);
        ciphers?: string;
        compressors?: (
            | "none"
            | "snappy"
            | "zlib"
            | "zstd")[];
        connectTimeoutMS?: number;
        credentials?: MongoCredentials;
        crl?: string | Buffer | (string | Buffer)[];
        ecdhCurve?: string;
        family?: number;
        hints?: number;
        hostAddress: HostAddress;
        key?: string | Buffer | (string | Buffer | KeyObject)[];
        loadBalanced: boolean;
        localAddress?: string;
        localPort?: number;
        logicalSessionTimeoutMinutes?: number;
        lookup?: LookupFunction;
        maxConnecting: number;
        maxIdleTimeMS: number;
        maxPoolSize: number;
        metadata: ClientMetadata;
        minDHSize?: number;
        minPoolSize: number;
        monitorCommands: boolean;
        noDelay?: boolean;
        passphrase?: string;
        pfx?: string | Buffer | (string | Buffer | PxfObject)[];
        proxyHost?: string;
        proxyPassword?: string;
        proxyPort?: number;
        proxyUsername?: string;
        rejectUnauthorized?: boolean;
        secureContext?: SecureContext;
        secureProtocol?: string;
        serverApi?: ServerApi;
        servername?: string;
        session?: Buffer;
        socketTimeoutMS?: number;
        tls: boolean;
        waitQueueTimeoutMS: number;
    }

    Hierarchy

    Properties

    ca?: string | Buffer | (string | Buffer)[]

    Optionally override the trusted CA certificates. Default is to trust the well-known CAs curated by Mozilla. Mozilla's CAs are completely replaced when CAs are explicitly specified using this option.

    -
    cancellationToken?: CancellationToken
    cert?: string | Buffer | (string | Buffer)[]

    Cert chains in PEM format. One cert chain should be provided per +

    cancellationToken?: CancellationToken
    cert?: string | Buffer | (string | Buffer)[]

    Cert chains in PEM format. One cert chain should be provided per private key. Each cert chain should consist of the PEM formatted certificate for a provided private key, followed by the PEM formatted intermediate certificates (if any), in order, and not @@ -80,7 +80,7 @@ information, see modifying the default cipher suite. Permitted ciphers can be obtained via tls.getCiphers(). Cipher names must be uppercased in order for OpenSSL to accept them.

    -
    compressors?: (
        | "none"
        | "snappy"
        | "zlib"
        | "zstd")[]
    connectTimeoutMS?: number
    credentials?: MongoCredentials
    crl?: string | Buffer | (string | Buffer)[]

    PEM formatted CRLs (Certificate Revocation Lists).

    +
    compressors?: (
        | "none"
        | "snappy"
        | "zlib"
        | "zstd")[]
    connectTimeoutMS?: number
    credentials?: MongoCredentials
    crl?: string | Buffer | (string | Buffer)[]

    PEM formatted CRLs (Certificate Revocation Lists).

    ecdhCurve?: string

    A string describing a named curve or a colon separated list of curve NIDs or names, for example P-521:P-384:P-256, to use for ECDH key agreement. Set to auto to select the curve automatically. Use @@ -88,7 +88,7 @@ recent releases, openssl ecparam -list_curves will also display the name and description of each available elliptic curve. Default: tls.DEFAULT_ECDH_CURVE.

    -
    family?: number
    hints?: number
    hostAddress: HostAddress
    key?: string | Buffer | (string | Buffer | KeyObject)[]

    Private keys in PEM format. PEM allows the option of private keys +

    family?: number
    hints?: number
    hostAddress: HostAddress
    key?: string | Buffer | (string | Buffer | KeyObject)[]

    Private keys in PEM format. PEM allows the option of private keys being encrypted. Encrypted keys will be decrypted with options.passphrase. Multiple keys using different algorithms can be provided either as an array of unencrypted key strings or buffers, @@ -97,11 +97,11 @@ object.passphrase is optional. Encrypted keys will be decrypted with object.passphrase if provided, or options.passphrase if it is not.

    loadBalanced: boolean

    If we are in load balancer mode.

    -
    localAddress?: string
    localPort?: number
    logicalSessionTimeoutMinutes?: number
    lookup?: LookupFunction
    maxConnecting: number

    The maximum number of connections that may be in the process of being established concurrently by the connection pool.

    -
    maxIdleTimeMS: number

    The maximum amount of time a connection should remain idle in the connection pool before being marked idle.

    -
    maxPoolSize: number

    The maximum number of connections that may be associated with a pool at a given time. This includes in use and available connections.

    -
    metadata: ClientMetadata
    minDHSize?: number
    minPoolSize: number

    The minimum number of connections that MUST exist at any moment in a single connection pool.

    -
    monitorCommands: boolean
    noDelay?: boolean
    passphrase?: string

    Shared passphrase used for a single private key and/or a PFX.

    +
    localAddress?: string
    localPort?: number
    logicalSessionTimeoutMinutes?: number
    lookup?: LookupFunction
    maxConnecting: number

    The maximum number of connections that may be in the process of being established concurrently by the connection pool.

    +
    maxIdleTimeMS: number

    The maximum amount of time a connection should remain idle in the connection pool before being marked idle.

    +
    maxPoolSize: number

    The maximum number of connections that may be associated with a pool at a given time. This includes in use and available connections.

    +
    metadata: ClientMetadata
    minDHSize?: number
    minPoolSize: number

    The minimum number of connections that MUST exist at any moment in a single connection pool.

    +
    monitorCommands: boolean
    noDelay?: boolean
    passphrase?: string

    Shared passphrase used for a single private key and/or a PFX.

    pfx?: string | Buffer | (string | Buffer | PxfObject)[]

    PFX or PKCS12 encoded private key and certificate chain. pfx is an alternative to providing key and cert individually. PFX is usually encrypted, if it is, passphrase will be used to decrypt it. Multiple @@ -110,7 +110,7 @@ passphrase: ]}. The object form can only occur in an array. object.passphrase is optional. Encrypted PFX will be decrypted with object.passphrase if provided, or options.passphrase if it is not.

    -
    proxyHost?: string
    proxyPassword?: string
    proxyPort?: number
    proxyUsername?: string
    rejectUnauthorized?: boolean

    If true the server will reject any connection which is not +

    proxyHost?: string
    proxyPassword?: string
    proxyPort?: number
    proxyUsername?: string
    rejectUnauthorized?: boolean

    If true the server will reject any connection which is not authorized with the list of supplied CAs. This option only has an effect if requestCert is true.

    true
    @@ -126,6 +126,6 @@
     any TLS protocol version up to TLSv1.3. It is not recommended to use
     TLS versions less than 1.2, but it may be required for
     interoperability. Default: none, see minVersion.

    -
    serverApi?: ServerApi
    servername?: string
    session?: Buffer

    An optional Buffer instance containing a TLS session.

    -
    socketTimeoutMS?: number
    tls: boolean
    waitQueueTimeoutMS: number

    The maximum amount of time operation execution should wait for a connection to become available. The default is 0 which means there is no limit.

    -
    +
    serverApi?: ServerApi
    servername?: string
    session?: Buffer

    An optional Buffer instance containing a TLS session.

    +
    socketTimeoutMS?: number
    tls: boolean
    waitQueueTimeoutMS: number

    The maximum amount of time operation execution should wait for a connection to become available. The default is 0 which means there is no limit.

    +
    diff --git a/docs/Next/interfaces/CountDocumentsOptions.html b/docs/Next/interfaces/CountDocumentsOptions.html index 86551758866..c33ac25cc93 100644 --- a/docs/Next/interfaces/CountDocumentsOptions.html +++ b/docs/Next/interfaces/CountDocumentsOptions.html @@ -1,4 +1,4 @@ -CountDocumentsOptions | mongodb

    Interface CountDocumentsOptions

    interface CountDocumentsOptions {
        allowDiskUse?: boolean;
        authdb?: string;
        batchSize?: number;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        cursor?: Document;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Hint;
        ignoreUndefined?: boolean;
        let?: Document;
        limit?: number;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        out?: string;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        skip?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    allowDiskUse? +CountDocumentsOptions | mongodb

    Interface CountDocumentsOptions

    interface CountDocumentsOptions {
        allowDiskUse?: boolean;
        authdb?: string;
        batchSize?: number;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        cursor?: Document;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Hint;
        ignoreUndefined?: boolean;
        let?: Document;
        limit?: number;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        out?: string;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        skip?: number;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    allowDiskUse?: boolean

    allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 >).

    -
    authdb?: string
    batchSize?: number

    The number of documents to return per batch. See aggregation documentation.

    -
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +
    authdb?: string
    batchSize?: number

    The number of documents to return per batch. See aggregation documentation.

    +
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    bypassDocumentValidation?: boolean

    Allow driver to bypass schema validation.

    -
    checkKeys?: boolean

    the serializer will check if keys are valid.

    +
    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Specify collation.

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    cursor?: Document

    Return the query as cursor, on 2.6 > it returns as a real cursor on pre 2.6 it returns as an emulated cursor.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    cursor?: Document

    Return the query as cursor, on 2.6 > it returns as a real cursor on pre 2.6 it returns as an emulated cursor.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    This API is deprecated in favor of collection.aggregate().explain() or db.aggregate().explain().

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    hint?: Hint

    Add an index selection hint to an aggregation command

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    -
    limit?: number

    The maximum amount of documents to consider.

    +
    limit?: number

    The maximum amount of documents to consider.

    maxAwaitTimeMS?: number

    The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query.

    -
    maxTimeMS?: number

    specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point.

    -
    noResponse?: boolean
    omitReadPreference?: boolean
    out?: string
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    maxTimeMS?: number

    Specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    out?: string
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -78,13 +79,14 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    skip?: number

    The number of documents to skip.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    skip?: number

    The number of documents to skip.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/CountOptions.html b/docs/Next/interfaces/CountOptions.html index f12cd9d51ab..164b211781d 100644 --- a/docs/Next/interfaces/CountOptions.html +++ b/docs/Next/interfaces/CountOptions.html @@ -1,4 +1,4 @@ -CountOptions | mongodb

    Interface CountOptions

    interface CountOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: string | Document;
        ignoreUndefined?: boolean;
        limit?: number;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        skip?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +CountOptions | mongodb

    Interface CountOptions

    interface CountOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: string | Document;
        ignoreUndefined?: boolean;
        limit?: number;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        skip?: number;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    hint?: string | Document

    An index name hint for the query.

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    limit?: number

    The maximum amounts to count before aborting.

    -
    maxTimeMS?: number

    Number of milliseconds to wait before aborting the query.

    -
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    maxTimeMS?: number

    Number of milliseconds to wait before aborting the query.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -63,13 +64,14 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    skip?: number

    The number of documents to skip.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    skip?: number

    The number of documents to skip.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/CreateCollectionOptions.html b/docs/Next/interfaces/CreateCollectionOptions.html index 61da1b43f01..c556114a600 100644 --- a/docs/Next/interfaces/CreateCollectionOptions.html +++ b/docs/Next/interfaces/CreateCollectionOptions.html @@ -1,4 +1,4 @@ -CreateCollectionOptions | mongodb

    Interface CreateCollectionOptions

    interface CreateCollectionOptions {
        authdb?: string;
        autoIndexId?: boolean;
        bsonRegExp?: boolean;
        capped?: boolean;
        changeStreamPreAndPostImages?: {
            enabled: boolean;
        };
        checkKeys?: boolean;
        clusteredIndex?: ClusteredCollectionOptions;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        encryptedFields?: Document;
        expireAfterSeconds?: number;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        flags?: number;
        ignoreUndefined?: boolean;
        indexOptionDefaults?: Document;
        max?: number;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        pipeline?: Document[];
        pkFactory?: PkFactory;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        size?: number;
        storageEngine?: Document;
        timeseries?: TimeSeriesCollectionOptions;
        useBigInt64?: boolean;
        validationAction?: string;
        validationLevel?: string;
        validator?: Document;
        viewOn?: string;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +CreateCollectionOptions | mongodb

    Interface CreateCollectionOptions

    interface CreateCollectionOptions {
        authdb?: string;
        autoIndexId?: boolean;
        bsonRegExp?: boolean;
        capped?: boolean;
        changeStreamPreAndPostImages?: {
            enabled: boolean;
        };
        checkKeys?: boolean;
        clusteredIndex?: ClusteredCollectionOptions;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        encryptedFields?: Document;
        expireAfterSeconds?: number;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        flags?: number;
        ignoreUndefined?: boolean;
        indexOptionDefaults?: Document;
        max?: number;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        pipeline?: Document[];
        pkFactory?: PkFactory;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        size?: number;
        storageEngine?: Document;
        timeoutMS?: number;
        timeseries?: TimeSeriesCollectionOptions;
        useBigInt64?: boolean;
        validationAction?: string;
        validationLevel?: string;
        validator?: Document;
        viewOn?: string;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    autoIndexId?: boolean

    Create an index on the _id field of the document. This option is deprecated in MongoDB 3.2+ and will be removed once no longer supported by the server.

    -
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    autoIndexId?: boolean

    Create an index on the _id field of the document. This option is deprecated in MongoDB 3.2+ and will be removed once no longer supported by the server.

    +
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    capped?: boolean

    Create a capped collection

    -
    changeStreamPreAndPostImages?: {
        enabled: boolean;
    }

    If set, enables pre-update and post-update document events to be included for any +

    changeStreamPreAndPostImages?: {
        enabled: boolean;
    }

    If set, enables pre-update and post-update document events to be included for any change streams that listen on this collection.

    -
    checkKeys?: boolean

    the serializer will check if keys are valid.

    +
    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    A document specifying configuration options for clustered collections. For MongoDB 5.3 and above.

    -
    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    collation?: CollationOptions

    Collation

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    -
    encryptedFields?: Document
    expireAfterSeconds?: number

    The number of seconds after which a document in a timeseries or clustered collection expires.

    -

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    encryptedFields?: Document
    expireAfterSeconds?: number

    The number of seconds after which a document in a timeseries or clustered collection expires.

    +

    Specifies the verbosity mode for the explain output.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    flags?: number

    Available for the MMAPv1 storage engine only to set the usePowerOf2Sizes and the noPadding flag

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    indexOptionDefaults?: Document

    Allows users to specify a default configuration for indexes when creating a collection

    -
    max?: number

    The maximum number of documents in the capped collection

    -
    maxTimeMS?: number
    noResponse?: boolean
    omitReadPreference?: boolean
    pipeline?: Document[]

    An array that consists of the aggregation pipeline stage. Creates the view by applying the specified pipeline to the viewOn collection or view

    -
    pkFactory?: PkFactory

    A primary key factory function for generation of custom _id keys.

    -
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    max?: number

    The maximum number of documents in the capped collection

    +
    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    pipeline?: Document[]

    An array that consists of the aggregation pipeline stage. Creates the view by applying the specified pipeline to the viewOn collection or view

    +
    pkFactory?: PkFactory

    A primary key factory function for generation of custom _id keys.

    +
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -86,19 +88,20 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    size?: number

    The size of the capped collection in bytes

    -
    storageEngine?: Document

    Allows users to specify configuration to the storage engine on a per-collection basis when creating a collection

    -

    A document specifying configuration options for timeseries collections.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    size?: number

    The size of the capped collection in bytes

    +
    storageEngine?: Document

    Allows users to specify configuration to the storage engine on a per-collection basis when creating a collection

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +

    A document specifying configuration options for timeseries collections.

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    validationAction?: string

    Determines whether to error on invalid documents or just warn about the violations but allow invalid documents to be inserted

    -
    validationLevel?: string

    Determines how strictly MongoDB applies the validation rules to existing documents during an update

    -
    validator?: Document

    Allows users to specify validation rules or expressions for the collection. For more information, see Document Validation

    -
    viewOn?: string

    The name of the source collection or view from which to create the view. The name is not the full namespace of the collection or view (i.e., does not include the database name and implies the same database as the view to create)

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    validationLevel?: string

    Determines how strictly MongoDB applies the validation rules to existing documents during an update

    +
    validator?: Document

    Allows users to specify validation rules or expressions for the collection. For more information, see Document Validation

    +
    viewOn?: string

    The name of the source collection or view from which to create the view. The name is not the full namespace of the collection or view (i.e., does not include the database name and implies the same database as the view to create)

    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/CreateIndexesOptions.html b/docs/Next/interfaces/CreateIndexesOptions.html index 9270a54eacc..cb5979b5727 100644 --- a/docs/Next/interfaces/CreateIndexesOptions.html +++ b/docs/Next/interfaces/CreateIndexesOptions.html @@ -1,4 +1,4 @@ -CreateIndexesOptions | mongodb

    Interface CreateIndexesOptions

    interface CreateIndexesOptions {
        2dsphereIndexVersion?: number;
        authdb?: string;
        background?: boolean;
        bits?: number;
        bsonRegExp?: boolean;
        bucketSize?: number;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        commitQuorum?: string | number;
        dbName?: string;
        default_language?: string;
        enableUtf8Validation?: boolean;
        expireAfterSeconds?: number;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hidden?: boolean;
        ignoreUndefined?: boolean;
        language_override?: string;
        max?: number;
        maxTimeMS?: number;
        min?: number;
        name?: string;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        partialFilterExpression?: Document;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        sparse?: boolean;
        storageEngine?: Document;
        textIndexVersion?: number;
        unique?: boolean;
        useBigInt64?: boolean;
        version?: number;
        weights?: Document;
        wildcardProjection?: Document;
        willRetryWrite?: boolean;
    }

    Hierarchy

    Properties

    2dsphereIndexVersion? +CreateIndexesOptions | mongodb

    Interface CreateIndexesOptions

    interface CreateIndexesOptions {
        2dsphereIndexVersion?: number;
        authdb?: string;
        background?: boolean;
        bits?: number;
        bsonRegExp?: boolean;
        bucketSize?: number;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        commitQuorum?: string | number;
        dbName?: string;
        default_language?: string;
        enableUtf8Validation?: boolean;
        expireAfterSeconds?: number;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hidden?: boolean;
        ignoreUndefined?: boolean;
        language_override?: string;
        max?: number;
        maxTimeMS?: number;
        min?: number;
        name?: string;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        partialFilterExpression?: Document;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        sparse?: boolean;
        storageEngine?: Document;
        textIndexVersion?: number;
        timeoutMS?: number;
        unique?: boolean;
        useBigInt64?: boolean;
        version?: number;
        weights?: Document;
        wildcardProjection?: Document;
        willRetryWrite?: boolean;
    }

    Hierarchy

    Properties

    2dsphereIndexVersion?: number
    authdb?: string
    background?: boolean

    Creates the index in the background, yielding whenever possible.

    -
    bits?: number
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    2dsphereIndexVersion?: number
    authdb?: string
    background?: boolean

    Creates the index in the background, yielding whenever possible.

    +
    bits?: number
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    -
    bucketSize?: number
    checkKeys?: boolean

    the serializer will check if keys are valid.

    +
    bucketSize?: number
    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    commitQuorum?: string | number

    (MongoDB 4.4. or higher) Specifies how many data-bearing members of a replica set, including the primary, must complete the index builds successfully before the primary marks the indexes as ready. This option accepts the same values for the "w" field in a write concern plus "votingMembers", which indicates all voting data-bearing nodes.

    -
    dbName?: string
    default_language?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    commitQuorum?: string | number

    (MongoDB 4.4. or higher) Specifies how many data-bearing members of a replica set, including the primary, must complete the index builds successfully before the primary marks the indexes as ready. This option accepts the same values for the "w" field in a write concern plus "votingMembers", which indicates all voting data-bearing nodes.

    +
    dbName?: string
    default_language?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    expireAfterSeconds?: number

    Allows you to expire data on indexes applied to a data (MongoDB 2.2 or higher)

    -

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +

    Specifies the verbosity mode for the explain output.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    hidden?: boolean

    Specifies that the index should exist on the target collection but should not be used by the query planner when executing operations. (MongoDB 4.4 or higher)

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    -
    language_override?: string
    max?: number

    For geospatial indexes set the high bound for the co-ordinates.

    -
    maxTimeMS?: number
    min?: number

    For geospatial indexes set the lower bound for the co-ordinates.

    -
    name?: string

    Override the autogenerated index name (useful if the resulting name is larger than 128 bytes)

    -
    noResponse?: boolean
    omitReadPreference?: boolean
    partialFilterExpression?: Document

    Creates a partial index based on the given filter object (MongoDB 3.2 or higher)

    -
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    language_override?: string
    max?: number

    For geospatial indexes set the high bound for the co-ordinates.

    +
    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    min?: number

    For geospatial indexes set the lower bound for the co-ordinates.

    +
    name?: string

    Override the autogenerated index name (useful if the resulting name is larger than 128 bytes)

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    partialFilterExpression?: Document

    Creates a partial index based on the given filter object (MongoDB 3.2 or higher)

    +
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -84,15 +86,16 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    sparse?: boolean

    Creates a sparse index.

    -
    storageEngine?: Document

    Allows users to configure the storage engine on a per-index basis when creating an index. (MongoDB 3.0 or higher)

    -
    textIndexVersion?: number
    unique?: boolean

    Creates an unique index.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    sparse?: boolean

    Creates a sparse index.

    +
    storageEngine?: Document

    Allows users to configure the storage engine on a per-index basis when creating an index. (MongoDB 3.0 or higher)

    +
    textIndexVersion?: number
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    unique?: boolean

    Creates an unique index.

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    version?: number

    Specifies the index version number, either 0 or 1.

    -
    weights?: Document
    wildcardProjection?: Document
    willRetryWrite?: boolean
    +
    weights?: Document
    wildcardProjection?: Document
    willRetryWrite?: boolean
    diff --git a/docs/Next/interfaces/CursorStreamOptions.html b/docs/Next/interfaces/CursorStreamOptions.html index 7b780d44213..a6912d12a44 100644 --- a/docs/Next/interfaces/CursorStreamOptions.html +++ b/docs/Next/interfaces/CursorStreamOptions.html @@ -1,3 +1,3 @@ -CursorStreamOptions | mongodb

    Interface CursorStreamOptions

    interface CursorStreamOptions {
        transform?(this: void, doc: Document): Document;
    }

    Methods

    transform? +CursorStreamOptions | mongodb

    Interface CursorStreamOptions

    interface CursorStreamOptions {
        transform?(this: void, doc: Document): Document;
    }

    Methods

    Methods

    +

    Parameters

    Returns Document

    diff --git a/docs/Next/interfaces/DataKey.html b/docs/Next/interfaces/DataKey.html index bd2f6de154a..b933dc8a9fe 100644 --- a/docs/Next/interfaces/DataKey.html +++ b/docs/Next/interfaces/DataKey.html @@ -1,5 +1,5 @@ DataKey | mongodb

    Interface DataKey

    The schema for a DataKey in the key vault collection.

    -
    interface DataKey {
        _id: UUID;
        creationDate: Date;
        keyAltNames?: string[];
        keyMaterial: Binary;
        masterKey: Document;
        status: number;
        updateDate: Date;
        version?: number;
    }

    Properties

    _id +
    interface DataKey {
        _id: UUID;
        creationDate: Date;
        keyAltNames?: string[];
        keyMaterial: Binary;
        masterKey: Document;
        status: number;
        updateDate: Date;
        version?: number;
    }

    Properties

    _id: UUID
    creationDate: Date
    keyAltNames?: string[]
    keyMaterial: Binary
    masterKey: Document
    status: number
    updateDate: Date
    version?: number
    +

    Properties

    _id: UUID
    creationDate: Date
    keyAltNames?: string[]
    keyMaterial: Binary
    masterKey: Document
    status: number
    updateDate: Date
    version?: number
    diff --git a/docs/Next/interfaces/DbOptions.html b/docs/Next/interfaces/DbOptions.html index ab560a81d02..1fda65f0857 100644 --- a/docs/Next/interfaces/DbOptions.html +++ b/docs/Next/interfaces/DbOptions.html @@ -1,4 +1,4 @@ -DbOptions | mongodb

    Interface DbOptions

    interface DbOptions {
        authSource?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        enableUtf8Validation?: boolean;
        fieldsAsRaw?: Document;
        forceServerObjectId?: boolean;
        ignoreUndefined?: boolean;
        pkFactory?: PkFactory;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcern;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        useBigInt64?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authSource? +DbOptions | mongodb

    Interface DbOptions

    interface DbOptions {
        authSource?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        enableUtf8Validation?: boolean;
        fieldsAsRaw?: Document;
        forceServerObjectId?: boolean;
        ignoreUndefined?: boolean;
        pkFactory?: PkFactory;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcern;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        timeoutMS?: number;
        useBigInt64?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authSource?: string

    If the database authentication is dependent on another databaseName.

    @@ -51,7 +52,8 @@
    retryWrites?: boolean

    Should retry failed writes

    serializeFunctions?: boolean

    serialize the javascript functions

    false

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    Write Concern as an object

    -
    +
    diff --git a/docs/Next/interfaces/DbStatsOptions.html b/docs/Next/interfaces/DbStatsOptions.html index 08ef948907d..429250a4faa 100644 --- a/docs/Next/interfaces/DbStatsOptions.html +++ b/docs/Next/interfaces/DbStatsOptions.html @@ -1,4 +1,4 @@ -DbStatsOptions | mongodb

    Interface DbStatsOptions

    interface DbStatsOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        scale?: number;
        serializeFunctions?: boolean;
        session?: ClientSession;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +DbStatsOptions | mongodb

    Interface DbStatsOptions

    interface DbStatsOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        scale?: number;
        serializeFunctions?: boolean;
        session?: ClientSession;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    -
    maxTimeMS?: number
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -58,13 +60,14 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    scale?: number

    Divide the returned sizes by scale value.

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    scale?: number

    Divide the returned sizes by scale value.

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/DeleteManyModel.html b/docs/Next/interfaces/DeleteManyModel.html index 1845e29d638..25b29c2b2af 100644 --- a/docs/Next/interfaces/DeleteManyModel.html +++ b/docs/Next/interfaces/DeleteManyModel.html @@ -1,7 +1,7 @@ -DeleteManyModel | mongodb

    Interface DeleteManyModel<TSchema>

    interface DeleteManyModel<TSchema> {
        collation?: CollationOptions;
        filter: Filter<TSchema>;
        hint?: Hint;
    }

    Type Parameters

    Properties

    collation? +DeleteManyModel | mongodb

    Interface DeleteManyModel<TSchema>

    interface DeleteManyModel<TSchema> {
        collation?: CollationOptions;
        filter: Filter<TSchema>;
        hint?: Hint;
    }

    Type Parameters

    Properties

    collation?: CollationOptions

    Specifies a collation.

    -
    filter: Filter<TSchema>

    The filter to limit the deleted documents.

    -
    hint?: Hint

    The index to use. If specified, then the query system will only consider plans using the hinted index.

    -
    +
    filter: Filter<TSchema>

    The filter to limit the deleted documents.

    +
    hint?: Hint

    The index to use. If specified, then the query system will only consider plans using the hinted index.

    +
    diff --git a/docs/Next/interfaces/DeleteOneModel.html b/docs/Next/interfaces/DeleteOneModel.html index 4cd0b8cf0c7..a51f24688e9 100644 --- a/docs/Next/interfaces/DeleteOneModel.html +++ b/docs/Next/interfaces/DeleteOneModel.html @@ -1,7 +1,7 @@ -DeleteOneModel | mongodb

    Interface DeleteOneModel<TSchema>

    interface DeleteOneModel<TSchema> {
        collation?: CollationOptions;
        filter: Filter<TSchema>;
        hint?: Hint;
    }

    Type Parameters

    Properties

    collation? +DeleteOneModel | mongodb

    Interface DeleteOneModel<TSchema>

    interface DeleteOneModel<TSchema> {
        collation?: CollationOptions;
        filter: Filter<TSchema>;
        hint?: Hint;
    }

    Type Parameters

    Properties

    collation?: CollationOptions

    Specifies a collation.

    -
    filter: Filter<TSchema>

    The filter to limit the deleted documents.

    -
    hint?: Hint

    The index to use. If specified, then the query system will only consider plans using the hinted index.

    -
    +
    filter: Filter<TSchema>

    The filter to limit the deleted documents.

    +
    hint?: Hint

    The index to use. If specified, then the query system will only consider plans using the hinted index.

    +
    diff --git a/docs/Next/interfaces/DeleteOptions.html b/docs/Next/interfaces/DeleteOptions.html index ee5dc044955..c04a872de54 100644 --- a/docs/Next/interfaces/DeleteOptions.html +++ b/docs/Next/interfaces/DeleteOptions.html @@ -1,4 +1,4 @@ -DeleteOptions | mongodb

    Interface DeleteOptions

    interface DeleteOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: string | Document;
        ignoreUndefined?: boolean;
        let?: Document;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        ordered?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +DeleteOptions | mongodb

    Interface DeleteOptions

    interface DeleteOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: string | Document;
        ignoreUndefined?: boolean;
        let?: Document;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        ordered?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Specifies the collation to use for the operation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    hint?: string | Document

    Specify that the update query should only consider plans using the hinted index

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    -
    maxTimeMS?: number
    noResponse?: boolean
    omitReadPreference?: boolean
    ordered?: boolean

    If true, when an insert fails, don't execute the remaining writes. If false, continue with remaining inserts when one fails.

    -
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    ordered?: boolean

    If true, when an insert fails, don't execute the remaining writes. If false, continue with remaining inserts when one fails.

    +
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -63,12 +65,13 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/DeleteResult.html b/docs/Next/interfaces/DeleteResult.html index e49b3a722a8..225e0681c85 100644 --- a/docs/Next/interfaces/DeleteResult.html +++ b/docs/Next/interfaces/DeleteResult.html @@ -1,5 +1,5 @@ -DeleteResult | mongodb

    Interface DeleteResult

    interface DeleteResult {
        acknowledged: boolean;
        deletedCount: number;
    }

    Properties

    acknowledged +DeleteResult | mongodb

    Interface DeleteResult

    interface DeleteResult {
        acknowledged: boolean;
        deletedCount: number;
    }

    Properties

    acknowledged: boolean

    Indicates whether this write result was acknowledged. If not, then all other members of this result will be undefined.

    -
    deletedCount: number

    The number of documents that were deleted

    -
    +
    deletedCount: number

    The number of documents that were deleted

    +
    diff --git a/docs/Next/interfaces/DeleteStatement.html b/docs/Next/interfaces/DeleteStatement.html index f03d0ce48c7..0d7f2650b70 100644 --- a/docs/Next/interfaces/DeleteStatement.html +++ b/docs/Next/interfaces/DeleteStatement.html @@ -1,9 +1,9 @@ -DeleteStatement | mongodb

    Interface DeleteStatement

    interface DeleteStatement {
        collation?: CollationOptions;
        hint?: Hint;
        limit: number;
        q: Document;
    }

    Properties

    collation? +DeleteStatement | mongodb

    Interface DeleteStatement

    interface DeleteStatement {
        collation?: CollationOptions;
        hint?: Hint;
        limit: number;
        q: Document;
    }

    Properties

    Properties

    collation?: CollationOptions

    Specifies the collation to use for the operation.

    -
    hint?: Hint

    A document or string that specifies the index to use to support the query predicate.

    -
    limit: number

    The number of matching documents to delete.

    -

    The query that matches documents to delete.

    -
    +
    hint?: Hint

    A document or string that specifies the index to use to support the query predicate.

    +
    limit: number

    The number of matching documents to delete.

    +

    The query that matches documents to delete.

    +
    diff --git a/docs/Next/interfaces/DropCollectionOptions.html b/docs/Next/interfaces/DropCollectionOptions.html index 2ab3fdf919c..90378272676 100644 --- a/docs/Next/interfaces/DropCollectionOptions.html +++ b/docs/Next/interfaces/DropCollectionOptions.html @@ -1,4 +1,4 @@ -DropCollectionOptions | mongodb

    Interface DropCollectionOptions

    interface DropCollectionOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        encryptedFields?: Document;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +DropCollectionOptions | mongodb

    Interface DropCollectionOptions

    interface DropCollectionOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        encryptedFields?: Document;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    -
    encryptedFields?: Document

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    encryptedFields?: Document

    Specifies the verbosity mode for the explain output.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    -
    maxTimeMS?: number
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -58,12 +60,13 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/EndSessionOptions.html b/docs/Next/interfaces/EndSessionOptions.html index afe99370813..fc00b05088a 100644 --- a/docs/Next/interfaces/EndSessionOptions.html +++ b/docs/Next/interfaces/EndSessionOptions.html @@ -1,3 +1,5 @@ -EndSessionOptions | mongodb

    Interface EndSessionOptions

    interface EndSessionOptions {
        force?: boolean;
        forceClear?: boolean;
    }

    Properties

    force? +EndSessionOptions | mongodb

    Interface EndSessionOptions

    interface EndSessionOptions {
        force?: boolean;
        forceClear?: boolean;
        timeoutMS?: number;
    }

    Properties

    Properties

    force?: boolean
    forceClear?: boolean
    +timeoutMS? +

    Properties

    force?: boolean
    forceClear?: boolean
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    diff --git a/docs/Next/interfaces/EstimatedDocumentCountOptions.html b/docs/Next/interfaces/EstimatedDocumentCountOptions.html index f5cb4cb1a7c..3ef8cebae1a 100644 --- a/docs/Next/interfaces/EstimatedDocumentCountOptions.html +++ b/docs/Next/interfaces/EstimatedDocumentCountOptions.html @@ -1,4 +1,4 @@ -EstimatedDocumentCountOptions | mongodb

    Interface EstimatedDocumentCountOptions

    interface EstimatedDocumentCountOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +EstimatedDocumentCountOptions | mongodb

    Interface EstimatedDocumentCountOptions

    interface EstimatedDocumentCountOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    maxTimeMS?: number

    The maximum amount of time to allow the operation to run.

    This option is sent only if the caller explicitly provides a value. The default is to not send a value.

    -
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -59,12 +60,13 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/ExplainCommandOptions.html b/docs/Next/interfaces/ExplainCommandOptions.html index d1800c05e88..c31bac083b7 100644 --- a/docs/Next/interfaces/ExplainCommandOptions.html +++ b/docs/Next/interfaces/ExplainCommandOptions.html @@ -1,5 +1,5 @@ -ExplainCommandOptions | mongodb

    Interface ExplainCommandOptions

    interface ExplainCommandOptions {
        maxTimeMS?: number;
        verbosity: string;
    }

    Properties

    maxTimeMS? +ExplainCommandOptions | mongodb

    Interface ExplainCommandOptions

    interface ExplainCommandOptions {
        maxTimeMS?: number;
        verbosity: string;
    }

    Properties

    maxTimeMS?: number

    The maxTimeMS setting for the command.

    -
    verbosity: string

    The explain verbosity for the command.

    -
    +
    verbosity: string

    The explain verbosity for the command.

    +
    diff --git a/docs/Next/interfaces/ExplainOptions.html b/docs/Next/interfaces/ExplainOptions.html index 441b5fa35f2..c62aca5d3b5 100644 --- a/docs/Next/interfaces/ExplainOptions.html +++ b/docs/Next/interfaces/ExplainOptions.html @@ -10,6 +10,6 @@
    // limits the `explain` command to no more than 2 seconds
    collection.find({ name: 'john doe' }, {
    explain: {
    verbosity: 'queryPlanner',
    maxTimeMS: 2000
    }
    });
    -
    interface ExplainOptions {
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
    }

    Hierarchy (view full)

    Properties

    interface ExplainOptions {
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
    }

    Hierarchy (view full)

    Properties

    Properties

    Specifies the verbosity mode for the explain output.

    -
    +
    diff --git a/docs/Next/interfaces/FindOneAndDeleteOptions.html b/docs/Next/interfaces/FindOneAndDeleteOptions.html index b8628238aa6..569f576b6d9 100644 --- a/docs/Next/interfaces/FindOneAndDeleteOptions.html +++ b/docs/Next/interfaces/FindOneAndDeleteOptions.html @@ -1,4 +1,4 @@ -FindOneAndDeleteOptions | mongodb

    Interface FindOneAndDeleteOptions

    interface FindOneAndDeleteOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Document;
        ignoreUndefined?: boolean;
        includeResultMetadata?: boolean;
        let?: Document;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        projection?: Document;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        sort?: Sort;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +FindOneAndDeleteOptions | mongodb

    Interface FindOneAndDeleteOptions

    interface FindOneAndDeleteOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Document;
        ignoreUndefined?: boolean;
        includeResultMetadata?: boolean;
        let?: Document;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        projection?: Document;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        sort?: Sort;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    hint?: Document

    An optional hint for query optimization. See the command reference for more information.

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    includeResultMetadata?: boolean

    Return the ModifyResult instead of the modified document. Defaults to false

    -
    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    -
    maxTimeMS?: number
    noResponse?: boolean
    omitReadPreference?: boolean
    projection?: Document

    Limits the fields to return for all matching documents.

    -
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    +
    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    projection?: Document

    Limits the fields to return for all matching documents.

    +
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -66,13 +68,14 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    sort?: Sort

    Determines which document the operation modifies if the query selects multiple documents.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    sort?: Sort

    Determines which document the operation modifies if the query selects multiple documents.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/FindOneAndReplaceOptions.html b/docs/Next/interfaces/FindOneAndReplaceOptions.html index 2ba4996b95d..9e9da816004 100644 --- a/docs/Next/interfaces/FindOneAndReplaceOptions.html +++ b/docs/Next/interfaces/FindOneAndReplaceOptions.html @@ -1,4 +1,4 @@ -FindOneAndReplaceOptions | mongodb

    Interface FindOneAndReplaceOptions

    interface FindOneAndReplaceOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Document;
        ignoreUndefined?: boolean;
        includeResultMetadata?: boolean;
        let?: Document;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        projection?: Document;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        returnDocument?: ReturnDocument;
        serializeFunctions?: boolean;
        session?: ClientSession;
        sort?: Sort;
        upsert?: boolean;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +FindOneAndReplaceOptions | mongodb

    Interface FindOneAndReplaceOptions

    interface FindOneAndReplaceOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Document;
        ignoreUndefined?: boolean;
        includeResultMetadata?: boolean;
        let?: Document;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        projection?: Document;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        returnDocument?: ReturnDocument;
        serializeFunctions?: boolean;
        session?: ClientSession;
        sort?: Sort;
        timeoutMS?: number;
        upsert?: boolean;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    bypassDocumentValidation?: boolean

    Allow driver to bypass schema validation.

    -
    checkKeys?: boolean

    the serializer will check if keys are valid.

    +
    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    hint?: Document

    An optional hint for query optimization. See the command reference for more information.

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    includeResultMetadata?: boolean

    Return the ModifyResult instead of the modified document. Defaults to false

    -
    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    -
    maxTimeMS?: number
    noResponse?: boolean
    omitReadPreference?: boolean
    projection?: Document

    Limits the fields to return for all matching documents.

    -
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    +
    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    projection?: Document

    Limits the fields to return for all matching documents.

    +
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -70,15 +72,16 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    returnDocument?: ReturnDocument

    When set to 'after', returns the updated document rather than the original. The default is 'before'.

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    returnDocument?: ReturnDocument

    When set to 'after', returns the updated document rather than the original. The default is 'before'.

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    sort?: Sort

    Determines which document the operation modifies if the query selects multiple documents.

    -
    upsert?: boolean

    Upsert the document if it does not exist.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    sort?: Sort

    Determines which document the operation modifies if the query selects multiple documents.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    upsert?: boolean

    Upsert the document if it does not exist.

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/FindOneAndUpdateOptions.html b/docs/Next/interfaces/FindOneAndUpdateOptions.html index 2f063089ccd..2041211d889 100644 --- a/docs/Next/interfaces/FindOneAndUpdateOptions.html +++ b/docs/Next/interfaces/FindOneAndUpdateOptions.html @@ -1,4 +1,4 @@ -FindOneAndUpdateOptions | mongodb

    Interface FindOneAndUpdateOptions

    interface FindOneAndUpdateOptions {
        arrayFilters?: Document[];
        authdb?: string;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Document;
        ignoreUndefined?: boolean;
        includeResultMetadata?: boolean;
        let?: Document;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        projection?: Document;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        returnDocument?: ReturnDocument;
        serializeFunctions?: boolean;
        session?: ClientSession;
        sort?: Sort;
        upsert?: boolean;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    arrayFilters? +FindOneAndUpdateOptions | mongodb

    Interface FindOneAndUpdateOptions

    interface FindOneAndUpdateOptions {
        arrayFilters?: Document[];
        authdb?: string;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Document;
        ignoreUndefined?: boolean;
        includeResultMetadata?: boolean;
        let?: Document;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        projection?: Document;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        returnDocument?: ReturnDocument;
        serializeFunctions?: boolean;
        session?: ClientSession;
        sort?: Sort;
        timeoutMS?: number;
        upsert?: boolean;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    arrayFilters?: Document[]

    Optional list of array filters referenced in filtered positional operators

    -
    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +
    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    bypassDocumentValidation?: boolean

    Allow driver to bypass schema validation.

    -
    checkKeys?: boolean

    the serializer will check if keys are valid.

    +
    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    hint?: Document

    An optional hint for query optimization. See the command reference for more information.

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    includeResultMetadata?: boolean

    Return the ModifyResult instead of the modified document. Defaults to false

    -
    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    -
    maxTimeMS?: number
    noResponse?: boolean
    omitReadPreference?: boolean
    projection?: Document

    Limits the fields to return for all matching documents.

    -
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    +
    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    projection?: Document

    Limits the fields to return for all matching documents.

    +
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -72,15 +74,16 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    returnDocument?: ReturnDocument

    When set to 'after', returns the updated document rather than the original. The default is 'before'.

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    returnDocument?: ReturnDocument

    When set to 'after', returns the updated document rather than the original. The default is 'before'.

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    sort?: Sort

    Determines which document the operation modifies if the query selects multiple documents.

    -
    upsert?: boolean

    Upsert the document if it does not exist.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    sort?: Sort

    Determines which document the operation modifies if the query selects multiple documents.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    upsert?: boolean

    Upsert the document if it does not exist.

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/FindOptions.html b/docs/Next/interfaces/FindOptions.html index 4b1361949d2..537ea8716e6 100644 --- a/docs/Next/interfaces/FindOptions.html +++ b/docs/Next/interfaces/FindOptions.html @@ -1,5 +1,5 @@ -FindOptions | mongodb

    Interface FindOptions<TSchema>

    interface FindOptions<TSchema> {
        allowDiskUse?: boolean;
        allowPartialResults?: boolean;
        authdb?: string;
        awaitData?: boolean;
        batchSize?: number;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Hint;
        ignoreUndefined?: boolean;
        let?: Document;
        limit?: number;
        max?: Document;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        min?: Document;
        noCursorTimeout?: boolean;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        oplogReplay?: boolean;
        projection?: Document;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        returnKey?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        showRecordId?: boolean;
        singleBatch?: boolean;
        skip?: number;
        sort?: Sort;
        tailable?: boolean;
        timeout?: boolean;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
    }

    Type Parameters

    • TSchema extends Document = Document

      Unused schema definition, deprecated usage, only specify FindOptions with no generic

      -

    Hierarchy

    Properties

    allowDiskUse? +FindOptions | mongodb

    Interface FindOptions<TSchema>

    interface FindOptions<TSchema> {
        allowDiskUse?: boolean;
        allowPartialResults?: boolean;
        authdb?: string;
        awaitData?: boolean;
        batchSize?: number;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        hint?: Hint;
        ignoreUndefined?: boolean;
        let?: Document;
        limit?: number;
        max?: Document;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        min?: Document;
        noCursorTimeout?: boolean;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        oplogReplay?: boolean;
        projection?: Document;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        returnKey?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        showRecordId?: boolean;
        singleBatch?: boolean;
        skip?: number;
        sort?: Sort;
        tailable?: boolean;
        timeout?: boolean;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
    }

    Type Parameters

    • TSchema extends Document = Document

      Unused schema definition, deprecated usage, only specify FindOptions with no generic

      +

    Hierarchy (view full)

    Properties

    allowDiskUse?: boolean

    Allows disk use for blocking sort operations exceeding 100MB memory. (MongoDB 3.2 or higher)

    -
    allowPartialResults?: boolean

    For queries against a sharded collection, allows the command (or subsequent getMore commands) to return partial results, rather than an error, if one or more queried shards are unavailable.

    -
    authdb?: string
    awaitData?: boolean

    Specify if the cursor is a tailable-await cursor. Requires tailable to be true

    -
    batchSize?: number

    Set the batchSize for the getMoreCommand when iterating over the query results.

    -
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +
    allowPartialResults?: boolean

    For queries against a sharded collection, allows the command (or subsequent getMore commands) to return partial results, rather than an error, if one or more queried shards are unavailable.

    +
    authdb?: string
    awaitData?: boolean

    Specify if the cursor is a tailable-await cursor. Requires tailable to be true

    +
    batchSize?: number

    Set the batchSize for the getMoreCommand when iterating over the query results.

    +
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    -
    checkKeys?: boolean

    the serializer will check if keys are valid.

    +
    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    -
    collation?: CollationOptions

    Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    collation?: CollationOptions

    Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    -

    Specifies the verbosity mode for the explain output.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +

    Specifies the verbosity mode for the explain output.

    This API is deprecated in favor of collection.find().explain().

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    -
    hint?: Hint

    Tell the query to use specific indexes in the query. Object of indexes to use, {'_id':1}

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    hint?: Hint

    Tell the query to use specific indexes in the query. Object of indexes to use, {'_id':1}

    +
    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    -
    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    -
    limit?: number

    Sets the limit of documents returned in the query.

    -
    max?: Document

    The exclusive upper bound for a specific index

    -
    maxAwaitTimeMS?: number

    The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. Requires tailable and awaitData to be true

    -
    maxTimeMS?: number

    Number of milliseconds to wait before aborting the query.

    -
    min?: Document

    The inclusive lower bound for a specific index

    -
    noCursorTimeout?: boolean

    The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. Set this option to prevent that.

    -
    noResponse?: boolean
    omitReadPreference?: boolean
    oplogReplay?: boolean

    Option to enable an optimized code path for queries looking for a particular range of ts values in the oplog. Requires tailable to be true.

    +
    let?: Document

    Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0).

    +
    limit?: number

    Sets the limit of documents returned in the query.

    +
    max?: Document

    The exclusive upper bound for a specific index

    +
    maxAwaitTimeMS?: number

    The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. Requires tailable and awaitData to be true

    +
    maxTimeMS?: number

    Number of milliseconds to wait before aborting the query.

    +
    min?: Document

    The inclusive lower bound for a specific index

    +
    noCursorTimeout?: boolean

    The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. Set this option to prevent that.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    oplogReplay?: boolean

    Option to enable an optimized code path for queries looking for a particular range of ts values in the oplog. Requires tailable to be true.

    Starting from MongoDB 4.4 this flag is not needed and will be ignored.

    -
    projection?: Document

    The fields to return in the query. Object of fields to either include or exclude (one of, not both), {'a':1, 'b': 1} or {'a': 0, 'b': 0}

    -
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    projection?: Document

    The fields to return in the query. Object of fields to either include or exclude (one of, not both), {'a':1, 'b': 1} or {'a': 0, 'b': 0}

    +
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    -
    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    +
    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    -
    promoteValues?: boolean

    when deserializing will promote BSON values to their Node.js closest equivalent types.

    +
    promoteValues?: boolean

    when deserializing will promote BSON values to their Node.js closest equivalent types.

    true

    -
    raw?: boolean

    Enabling the raw option will return a Node.js Buffer +

    raw?: boolean

    Enabling the raw option will return a Node.js Buffer which is allocated using allocUnsafe API. See this section from the Node.js Docs here for more detail about what "unsafe" refers to in this context. @@ -92,19 +93,20 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    -
    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    returnKey?: boolean

    If true, returns only the index keys in the resulting documents.

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    returnKey?: boolean

    If true, returns only the index keys in the resulting documents.

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    -
    session?: ClientSession

    Specify ClientSession for this command

    -
    showRecordId?: boolean

    Determines whether to return the record identifier for each document. If true, adds a field $recordId to the returned documents.

    -
    singleBatch?: boolean

    Determines whether to close the cursor after the first batch. Defaults to false.

    -
    skip?: number

    Set to skip N documents ahead in your query (useful for pagination).

    -
    sort?: Sort

    Set to sort the documents coming back from the query. Array of indexes, [['a', 1]] etc.

    -
    tailable?: boolean

    Specify if the cursor is tailable.

    -
    timeout?: boolean

    Specify if the cursor can timeout.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    session?: ClientSession

    Specify ClientSession for this command

    +
    showRecordId?: boolean

    Determines whether to return the record identifier for each document. If true, adds a field $recordId to the returned documents.

    +
    singleBatch?: boolean

    Determines whether to close the cursor after the first batch. Defaults to false.

    +
    skip?: number

    Set to skip N documents ahead in your query (useful for pagination).

    +
    sort?: Sort

    Set to sort the documents coming back from the query. Array of indexes, [['a', 1]] etc.

    +
    tailable?: boolean

    Specify if the cursor is tailable.

    +
    timeout?: boolean

    Specify if the cursor can timeout.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean
    +
    willRetryWrite?: boolean
    diff --git a/docs/Next/interfaces/GCPEncryptionKeyOptions.html b/docs/Next/interfaces/GCPEncryptionKeyOptions.html index 1de4bffd3d7..9950a911ba7 100644 --- a/docs/Next/interfaces/GCPEncryptionKeyOptions.html +++ b/docs/Next/interfaces/GCPEncryptionKeyOptions.html @@ -1,14 +1,14 @@ GCPEncryptionKeyOptions | mongodb

    Interface GCPEncryptionKeyOptions

    Configuration options for making an AWS encryption key

    -
    interface GCPEncryptionKeyOptions {
        endpoint?: string;
        keyName: string;
        keyRing: string;
        keyVersion?: string;
        location: string;
        projectId: string;
    }

    Properties

    interface GCPEncryptionKeyOptions {
        endpoint?: string;
        keyName: string;
        keyRing: string;
        keyVersion?: string;
        location: string;
        projectId: string;
    }

    Properties

    endpoint?: string

    KMS URL, defaults to https://www.googleapis.com/auth/cloudkms

    -
    keyName: string

    Key name

    -
    keyRing: string

    Key ring name

    -
    keyVersion?: string

    Key version

    -
    location: string

    Location name (e.g. "global")

    -
    projectId: string

    GCP project ID

    -
    +
    keyName: string

    Key name

    +
    keyRing: string

    Key ring name

    +
    keyVersion?: string

    Key version

    +
    location: string

    Location name (e.g. "global")

    +
    projectId: string

    GCP project ID

    +
    diff --git a/docs/Next/interfaces/GridFSBucketOptions.html b/docs/Next/interfaces/GridFSBucketOptions.html index 8c6c77a3492..f3c03888942 100644 --- a/docs/Next/interfaces/GridFSBucketOptions.html +++ b/docs/Next/interfaces/GridFSBucketOptions.html @@ -1,9 +1,12 @@ -GridFSBucketOptions | mongodb

    Interface GridFSBucketOptions

    interface GridFSBucketOptions {
        bucketName?: string;
        chunkSizeBytes?: number;
        readPreference?: ReadPreference;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    bucketName? +GridFSBucketOptions | mongodb

    Interface GridFSBucketOptions

    interface GridFSBucketOptions {
        bucketName?: string;
        chunkSizeBytes?: number;
        readPreference?: ReadPreference;
        timeoutMS?: number;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    bucketName?: string

    The 'files' and 'chunks' collections will be prefixed with the bucket name followed by a dot.

    -
    chunkSizeBytes?: number

    Number of bytes stored in each chunk. Defaults to 255KB

    -
    readPreference?: ReadPreference

    Read preference to be passed to read operations

    -

    Write Concern as an object

    -
    +
    chunkSizeBytes?: number

    Number of bytes stored in each chunk. Defaults to 255KB

    +
    readPreference?: ReadPreference

    Read preference to be passed to read operations

    +
    timeoutMS?: number

    Specifies the lifetime duration of a gridFS stream. If any async operations are in progress +when this timeout expires, the stream will throw a timeout error.

    +

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/GridFSBucketReadStreamOptions.html b/docs/Next/interfaces/GridFSBucketReadStreamOptions.html index 8b25c22a7d0..9cfc74d4c0e 100644 --- a/docs/Next/interfaces/GridFSBucketReadStreamOptions.html +++ b/docs/Next/interfaces/GridFSBucketReadStreamOptions.html @@ -1,8 +1,10 @@ -GridFSBucketReadStreamOptions | mongodb

    Interface GridFSBucketReadStreamOptions

    interface GridFSBucketReadStreamOptions {
        end?: number;
        skip?: number;
        sort?: Sort;
        start?: number;
    }

    Hierarchy (view full)

    Properties

    end? +GridFSBucketReadStreamOptions | mongodb

    Interface GridFSBucketReadStreamOptions

    interface GridFSBucketReadStreamOptions {
        end?: number;
        skip?: number;
        sort?: Sort;
        start?: number;
        timeoutMS?: number;
    }

    Hierarchy (view full)

    Properties

    end?: number

    0-indexed non-negative byte offset to the end of the file contents to be returned by the stream. end is non-inclusive

    -
    skip?: number
    sort?: Sort
    start?: number

    0-indexed non-negative byte offset from the beginning of the file

    -
    +
    skip?: number
    sort?: Sort
    start?: number

    0-indexed non-negative byte offset from the beginning of the file

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    diff --git a/docs/Next/interfaces/GridFSBucketReadStreamOptionsWithRevision.html b/docs/Next/interfaces/GridFSBucketReadStreamOptionsWithRevision.html index 86170aafc80..54d4fbe26be 100644 --- a/docs/Next/interfaces/GridFSBucketReadStreamOptionsWithRevision.html +++ b/docs/Next/interfaces/GridFSBucketReadStreamOptionsWithRevision.html @@ -1,12 +1,14 @@ -GridFSBucketReadStreamOptionsWithRevision | mongodb

    Interface GridFSBucketReadStreamOptionsWithRevision

    interface GridFSBucketReadStreamOptionsWithRevision {
        end?: number;
        revision?: number;
        skip?: number;
        sort?: Sort;
        start?: number;
    }

    Hierarchy (view full)

    Properties

    end? +GridFSBucketReadStreamOptionsWithRevision | mongodb

    Interface GridFSBucketReadStreamOptionsWithRevision

    interface GridFSBucketReadStreamOptionsWithRevision {
        end?: number;
        revision?: number;
        skip?: number;
        sort?: Sort;
        start?: number;
        timeoutMS?: number;
    }

    Hierarchy (view full)

    Properties

    end?: number

    0-indexed non-negative byte offset to the end of the file contents to be returned by the stream. end is non-inclusive

    -
    revision?: number

    The revision number relative to the oldest file with the given filename. 0 +

    revision?: number

    The revision number relative to the oldest file with the given filename. 0 gets you the oldest file, 1 gets you the 2nd oldest, -1 gets you the newest.

    -
    skip?: number
    sort?: Sort
    start?: number

    0-indexed non-negative byte offset from the beginning of the file

    -
    +
    skip?: number
    sort?: Sort
    start?: number

    0-indexed non-negative byte offset from the beginning of the file

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    diff --git a/docs/Next/interfaces/GridFSBucketWriteStreamOptions.html b/docs/Next/interfaces/GridFSBucketWriteStreamOptions.html index 4e9b6341a95..d75816b0f4b 100644 --- a/docs/Next/interfaces/GridFSBucketWriteStreamOptions.html +++ b/docs/Next/interfaces/GridFSBucketWriteStreamOptions.html @@ -1,15 +1,17 @@ -GridFSBucketWriteStreamOptions | mongodb

    Interface GridFSBucketWriteStreamOptions

    interface GridFSBucketWriteStreamOptions {
        aliases?: string[];
        chunkSizeBytes?: number;
        contentType?: string;
        id?: ObjectId;
        metadata?: Document;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    aliases? +GridFSBucketWriteStreamOptions | mongodb

    Interface GridFSBucketWriteStreamOptions

    interface GridFSBucketWriteStreamOptions {
        aliases?: string[];
        chunkSizeBytes?: number;
        contentType?: string;
        id?: ObjectId;
        metadata?: Document;
        timeoutMS?: number;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    aliases?: string[]

    Array of strings to store in the file document's aliases field.

    Will be removed in the next major version. Add an aliases field to the metadata document instead.

    -
    chunkSizeBytes?: number

    Overwrite this bucket's chunkSizeBytes for this file

    -
    contentType?: string

    String to store in the file document's contentType field.

    +
    chunkSizeBytes?: number

    Overwrite this bucket's chunkSizeBytes for this file

    +
    contentType?: string

    String to store in the file document's contentType field.

    Will be removed in the next major version. Add a contentType field to the metadata document instead.

    -

    Custom file id for the GridFS file.

    -
    metadata?: Document

    Object to store in the file document's metadata field

    -

    Write Concern as an object

    -
    +

    Custom file id for the GridFS file.

    +
    metadata?: Document

    Object to store in the file document's metadata field

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/GridFSChunk.html b/docs/Next/interfaces/GridFSChunk.html index b5ef3433fb2..3d6ab9d7f1e 100644 --- a/docs/Next/interfaces/GridFSChunk.html +++ b/docs/Next/interfaces/GridFSChunk.html @@ -1,5 +1,5 @@ -GridFSChunk | mongodb

    Interface GridFSChunk

    interface GridFSChunk {
        _id: ObjectId;
        data: Uint8Array | Buffer;
        files_id: ObjectId;
        n: number;
    }

    Properties

    _id +GridFSChunk | mongodb

    Interface GridFSChunk

    interface GridFSChunk {
        _id: ObjectId;
        data: Uint8Array | Buffer;
        files_id: ObjectId;
        n: number;
    }

    Properties

    Properties

    data: Uint8Array | Buffer
    files_id: ObjectId
    n: number
    +

    Properties

    data: Uint8Array | Buffer
    files_id: ObjectId
    n: number
    diff --git a/docs/Next/interfaces/GridFSFile.html b/docs/Next/interfaces/GridFSFile.html index cf5d6e77f98..9ffaa47f6dc 100644 --- a/docs/Next/interfaces/GridFSFile.html +++ b/docs/Next/interfaces/GridFSFile.html @@ -1,4 +1,4 @@ -GridFSFile | mongodb

    Interface GridFSFile

    interface GridFSFile {
        _id: ObjectId;
        aliases?: string[];
        chunkSize: number;
        contentType?: string;
        filename: string;
        length: number;
        metadata?: Document;
        uploadDate: Date;
    }

    Properties

    _id +GridFSFile | mongodb

    Interface GridFSFile

    interface GridFSFile {
        _id: ObjectId;
        aliases?: string[];
        chunkSize: number;
        contentType?: string;
        filename: string;
        length: number;
        metadata?: Document;
        uploadDate: Date;
    }

    Properties

    aliases?: string[]

    Will be removed in the next major version.

    -
    chunkSize: number
    contentType?: string

    Will be removed in the next major version.

    -
    filename: string
    length: number
    metadata?: Document
    uploadDate: Date
    +

    Properties

    aliases?: string[]

    Will be removed in the next major version.

    +
    chunkSize: number
    contentType?: string

    Will be removed in the next major version.

    +
    filename: string
    length: number
    metadata?: Document
    uploadDate: Date
    diff --git a/docs/Next/interfaces/IndexDescription.html b/docs/Next/interfaces/IndexDescription.html index 03f68abd2fc..476da9d4de3 100644 --- a/docs/Next/interfaces/IndexDescription.html +++ b/docs/Next/interfaces/IndexDescription.html @@ -1,4 +1,4 @@ -IndexDescription | mongodb

    Interface IndexDescription

    interface IndexDescription {
        2dsphereIndexVersion?: number;
        background?: boolean;
        bits?: number;
        bucketSize?: number;
        collation?: CollationOptions;
        default_language?: string;
        expireAfterSeconds?: number;
        hidden?: boolean;
        key: {
            [key: string]: IndexDirection;
        } | Map<string, IndexDirection>;
        language_override?: string;
        max?: number;
        min?: number;
        name?: string;
        partialFilterExpression?: Document;
        sparse?: boolean;
        storageEngine?: Document;
        textIndexVersion?: number;
        unique?: boolean;
        version?: number;
        weights?: Document;
        wildcardProjection?: Document;
    }

    Hierarchy

    • Pick<CreateIndexesOptions,
          | "background"
          | "unique"
          | "partialFilterExpression"
          | "sparse"
          | "hidden"
          | "expireAfterSeconds"
          | "storageEngine"
          | "version"
          | "weights"
          | "default_language"
          | "language_override"
          | "textIndexVersion"
          | "2dsphereIndexVersion"
          | "bits"
          | "min"
          | "max"
          | "bucketSize"
          | "wildcardProjection">
      • IndexDescription

    Properties

    2dsphereIndexVersion? +IndexDescription | mongodb

    Interface IndexDescription

    interface IndexDescription {
        2dsphereIndexVersion?: number;
        background?: boolean;
        bits?: number;
        bucketSize?: number;
        collation?: CollationOptions;
        default_language?: string;
        expireAfterSeconds?: number;
        hidden?: boolean;
        key: {
            [key: string]: IndexDirection;
        } | Map<string, IndexDirection>;
        language_override?: string;
        max?: number;
        min?: number;
        name?: string;
        partialFilterExpression?: Document;
        sparse?: boolean;
        storageEngine?: Document;
        textIndexVersion?: number;
        unique?: boolean;
        version?: number;
        weights?: Document;
        wildcardProjection?: Document;
    }

    Hierarchy

    • Pick<CreateIndexesOptions,
          | "background"
          | "unique"
          | "partialFilterExpression"
          | "sparse"
          | "hidden"
          | "expireAfterSeconds"
          | "storageEngine"
          | "version"
          | "weights"
          | "default_language"
          | "language_override"
          | "textIndexVersion"
          | "2dsphereIndexVersion"
          | "bits"
          | "min"
          | "max"
          | "bucketSize"
          | "wildcardProjection">
      • IndexDescription

    Properties

    2dsphereIndexVersion?: number
    background?: boolean

    Creates the index in the background, yielding whenever possible.

    -
    bits?: number
    bucketSize?: number
    collation?: CollationOptions
    default_language?: string
    expireAfterSeconds?: number

    Allows you to expire data on indexes applied to a data (MongoDB 2.2 or higher)

    -
    hidden?: boolean

    Specifies that the index should exist on the target collection but should not be used by the query planner when executing operations. (MongoDB 4.4 or higher)

    -
    key: {
        [key: string]: IndexDirection;
    } | Map<string, IndexDirection>
    language_override?: string
    max?: number

    For geospatial indexes set the high bound for the co-ordinates.

    -
    min?: number

    For geospatial indexes set the lower bound for the co-ordinates.

    -
    name?: string
    partialFilterExpression?: Document

    Creates a partial index based on the given filter object (MongoDB 3.2 or higher)

    -
    sparse?: boolean

    Creates a sparse index.

    -
    storageEngine?: Document

    Allows users to configure the storage engine on a per-index basis when creating an index. (MongoDB 3.0 or higher)

    -
    textIndexVersion?: number
    unique?: boolean

    Creates an unique index.

    -
    version?: number

    Specifies the index version number, either 0 or 1.

    -
    weights?: Document
    wildcardProjection?: Document
    +

    Properties

    2dsphereIndexVersion?: number
    background?: boolean

    Creates the index in the background, yielding whenever possible.

    +
    bits?: number
    bucketSize?: number
    collation?: CollationOptions
    default_language?: string
    expireAfterSeconds?: number

    Allows you to expire data on indexes applied to a data (MongoDB 2.2 or higher)

    +
    hidden?: boolean

    Specifies that the index should exist on the target collection but should not be used by the query planner when executing operations. (MongoDB 4.4 or higher)

    +
    key: {
        [key: string]: IndexDirection;
    } | Map<string, IndexDirection>
    language_override?: string
    max?: number

    For geospatial indexes set the high bound for the co-ordinates.

    +
    min?: number

    For geospatial indexes set the lower bound for the co-ordinates.

    +
    name?: string
    partialFilterExpression?: Document

    Creates a partial index based on the given filter object (MongoDB 3.2 or higher)

    +
    sparse?: boolean

    Creates a sparse index.

    +
    storageEngine?: Document

    Allows users to configure the storage engine on a per-index basis when creating an index. (MongoDB 3.0 or higher)

    +
    textIndexVersion?: number
    unique?: boolean

    Creates an unique index.

    +
    version?: number

    Specifies the index version number, either 0 or 1.

    +
    weights?: Document
    wildcardProjection?: Document
    diff --git a/docs/Next/interfaces/IndexInformationOptions.html b/docs/Next/interfaces/IndexInformationOptions.html index 56f45dbfa67..5210ee0f244 100644 --- a/docs/Next/interfaces/IndexInformationOptions.html +++ b/docs/Next/interfaces/IndexInformationOptions.html @@ -1,4 +1,4 @@ -IndexInformationOptions | mongodb

    Interface IndexInformationOptions

    interface IndexInformationOptions {
        awaitData?: boolean;
        batchSize?: number;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        comment?: unknown;
        enableUtf8Validation?: boolean;
        fieldsAsRaw?: Document;
        full?: boolean;
        ignoreUndefined?: boolean;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        noCursorTimeout?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        serializeFunctions?: boolean;
        session?: ClientSession;
        tailable?: boolean;
        useBigInt64?: boolean;
    }

    Hierarchy (view full)

    Properties

    awaitData? +IndexInformationOptions | mongodb

    Interface IndexInformationOptions

    interface IndexInformationOptions {
        awaitData?: boolean;
        batchSize?: number;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        comment?: unknown;
        enableUtf8Validation?: boolean;
        fieldsAsRaw?: Document;
        full?: boolean;
        ignoreUndefined?: boolean;
        maxAwaitTimeMS?: number;
        maxTimeMS?: number;
        noCursorTimeout?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        serializeFunctions?: boolean;
        session?: ClientSession;
        tailable?: boolean;
        timeoutMode?: CursorTimeoutMode;
        timeoutMS?: number;
        useBigInt64?: boolean;
    }

    Hierarchy (view full)

    Properties

    awaitData?: boolean

    If awaitData is set to true, when the cursor reaches the end of the capped collection, MongoDB blocks the query thread for a period of time waiting for new data to arrive. When new data is inserted into the capped collection, the blocked thread is signaled to wake up and return the next batch to the client.

    -
    batchSize?: number

    Specifies the number of documents to return in each response from MongoDB

    -
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +
    batchSize?: number

    Specifies the number of documents to return in each response from MongoDB

    +
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    @@ -33,7 +35,7 @@

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    full?: boolean

    When true, an array of index descriptions is returned. @@ -47,14 +49,14 @@

    {
    'a_1': [['a', 1]],
    'b_1_c_1': [['b', 1], ['c', 1]],
    }
    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    maxAwaitTimeMS?: number

    When applicable maxAwaitTimeMS controls the amount of time subsequent getMores that a cursor uses to fetch more data should take. (ex. cursor.next())

    -
    maxTimeMS?: number

    When applicable maxTimeMS controls the amount of time the initial command +

    maxTimeMS?: number

    When applicable maxTimeMS controls the amount of time the initial command that constructs a cursor should take. (ex. find, aggregate, listCollections)

    -
    noCursorTimeout?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    noCursorTimeout?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -71,12 +73,27 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    -
    readConcern?: ReadConcernLike
    readPreference?: ReadPreferenceLike
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readConcern?: ReadConcernLike
    readPreference?: ReadPreferenceLike
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    -
    session?: ClientSession
    tailable?: boolean

    By default, MongoDB will automatically close a cursor when the +

    session?: ClientSession
    tailable?: boolean

    By default, MongoDB will automatically close a cursor when the client has exhausted all results in the cursor. However, for capped collections you may use a Tailable Cursor that remains open after the client exhausts the results in the initial cursor.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMode?: CursorTimeoutMode

    Specifies how timeoutMS is applied to the cursor. Can be either 'cursorLifeTime' or 'iteration' +When set to 'iteration', the deadline specified by timeoutMS applies to each call of +cursor.next(). +When set to 'cursorLifetime', the deadline applies to the life of the entire cursor.

    +

    Depending on the type of cursor being used, this option has different default values. +For non-tailable cursors, this value defaults to 'cursorLifetime' +For tailable cursors, this value defaults to 'iteration' since tailable cursors, by +definition can have an arbitrarily long lifetime.

    +
    const cursor = collection.find({}, {timeoutMS: 100, timeoutMode: 'iteration'});
    for await (const doc of cursor) {
    // process doc
    // This will throw a timeout error if any of the iterator's `next()` calls takes more than 100ms, but
    // will continue to iterate successfully otherwise, regardless of the number of batches.
    } +
    + +
    const cursor = collection.find({}, { timeoutMS: 1000, timeoutMode: 'cursorLifetime' });
    const docs = await cursor.toArray(); // This entire line will throw a timeout error if all batches are not fetched and returned within 1000ms. +
    + +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error. See AbstractCursorOptions.timeoutMode for more details on how this option applies to cursors.

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    +
    diff --git a/docs/Next/interfaces/InsertManyResult.html b/docs/Next/interfaces/InsertManyResult.html index 145e7174a56..fb552b7fd7b 100644 --- a/docs/Next/interfaces/InsertManyResult.html +++ b/docs/Next/interfaces/InsertManyResult.html @@ -1,7 +1,7 @@ -InsertManyResult | mongodb

    Interface InsertManyResult<TSchema>

    interface InsertManyResult<TSchema> {
        acknowledged: boolean;
        insertedCount: number;
        insertedIds: {
            [key: number]: InferIdType<TSchema>;
        };
    }

    Type Parameters

    Properties

    acknowledged +InsertManyResult | mongodb

    Interface InsertManyResult<TSchema>

    interface InsertManyResult<TSchema> {
        acknowledged: boolean;
        insertedCount: number;
        insertedIds: {
            [key: number]: InferIdType<TSchema>;
        };
    }

    Type Parameters

    Properties

    acknowledged: boolean

    Indicates whether this write result was acknowledged. If not, then all other members of this result will be undefined

    -
    insertedCount: number

    The number of inserted documents for this operations

    -
    insertedIds: {
        [key: number]: InferIdType<TSchema>;
    }

    Map of the index of the inserted document to the id of the inserted document

    -
    +
    insertedCount: number

    The number of inserted documents for this operations

    +
    insertedIds: {
        [key: number]: InferIdType<TSchema>;
    }

    Map of the index of the inserted document to the id of the inserted document

    +
    diff --git a/docs/Next/interfaces/InsertOneModel.html b/docs/Next/interfaces/InsertOneModel.html index 69ed23d8693..f96323d902a 100644 --- a/docs/Next/interfaces/InsertOneModel.html +++ b/docs/Next/interfaces/InsertOneModel.html @@ -1,3 +1,3 @@ -InsertOneModel | mongodb

    Interface InsertOneModel<TSchema>

    interface InsertOneModel<TSchema> {
        document: OptionalId<TSchema>;
    }

    Type Parameters

    Properties

    document +InsertOneModel | mongodb

    Interface InsertOneModel<TSchema>

    interface InsertOneModel<TSchema> {
        document: OptionalId<TSchema>;
    }

    Type Parameters

    Properties

    Properties

    document: OptionalId<TSchema>

    The document to insert.

    -
    +
    diff --git a/docs/Next/interfaces/InsertOneOptions.html b/docs/Next/interfaces/InsertOneOptions.html index 344bc6cf264..c8fb446366c 100644 --- a/docs/Next/interfaces/InsertOneOptions.html +++ b/docs/Next/interfaces/InsertOneOptions.html @@ -1,4 +1,4 @@ -InsertOneOptions | mongodb

    Interface InsertOneOptions

    interface InsertOneOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        forceServerObjectId?: boolean;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +InsertOneOptions | mongodb

    Interface InsertOneOptions

    interface InsertOneOptions {
        authdb?: string;
        bsonRegExp?: boolean;
        bypassDocumentValidation?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        forceServerObjectId?: boolean;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    bypassDocumentValidation?: boolean

    Allow driver to bypass schema validation.

    -
    checkKeys?: boolean

    the serializer will check if keys are valid.

    +
    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    forceServerObjectId?: boolean

    Force server to assign _id values instead of driver.

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    -
    maxTimeMS?: number
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -61,12 +63,13 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/InsertOneResult.html b/docs/Next/interfaces/InsertOneResult.html index 106f5245819..484d624650e 100644 --- a/docs/Next/interfaces/InsertOneResult.html +++ b/docs/Next/interfaces/InsertOneResult.html @@ -1,5 +1,5 @@ -InsertOneResult | mongodb

    Interface InsertOneResult<TSchema>

    interface InsertOneResult<TSchema> {
        acknowledged: boolean;
        insertedId: InferIdType<TSchema>;
    }

    Type Parameters

    Properties

    acknowledged +InsertOneResult | mongodb

    Interface InsertOneResult<TSchema>

    interface InsertOneResult<TSchema> {
        acknowledged: boolean;
        insertedId: InferIdType<TSchema>;
    }

    Type Parameters

    Properties

    acknowledged: boolean

    Indicates whether this write result was acknowledged. If not, then all other members of this result will be undefined

    -
    insertedId: InferIdType<TSchema>

    The identifier that was inserted. If the server generated the identifier, this value will be null as the driver does not have access to that data

    -
    +
    insertedId: InferIdType<TSchema>

    The identifier that was inserted. If the server generated the identifier, this value will be null as the driver does not have access to that data

    +
    diff --git a/docs/Next/interfaces/KMIPEncryptionKeyOptions.html b/docs/Next/interfaces/KMIPEncryptionKeyOptions.html index 07d0b900ea4..f9de455a5a2 100644 --- a/docs/Next/interfaces/KMIPEncryptionKeyOptions.html +++ b/docs/Next/interfaces/KMIPEncryptionKeyOptions.html @@ -1,10 +1,10 @@ KMIPEncryptionKeyOptions | mongodb

    Interface KMIPEncryptionKeyOptions

    Configuration options for making a KMIP encryption key

    -
    interface KMIPEncryptionKeyOptions {
        delegated?: boolean;
        endpoint?: string;
        keyId?: string;
    }

    Properties

    interface KMIPEncryptionKeyOptions {
        delegated?: boolean;
        endpoint?: string;
        keyId?: string;
    }

    Properties

    delegated?: boolean

    If true, this key should be decrypted by the KMIP server.

    Requires mongodb-client-encryption>=6.0.1.

    -
    endpoint?: string

    Host with optional port.

    -
    keyId?: string

    keyId is the KMIP Unique Identifier to a 96 byte KMIP Secret Data managed object.

    +
    endpoint?: string

    Host with optional port.

    +
    keyId?: string

    keyId is the KMIP Unique Identifier to a 96 byte KMIP Secret Data managed object.

    If keyId is omitted, a random 96 byte KMIP Secret Data managed object will be created.

    -
    +
    diff --git a/docs/Next/interfaces/ListCollectionsOptions.html b/docs/Next/interfaces/ListCollectionsOptions.html index 172f35ffb0e..95e0bb32496 100644 --- a/docs/Next/interfaces/ListCollectionsOptions.html +++ b/docs/Next/interfaces/ListCollectionsOptions.html @@ -1,4 +1,4 @@ -ListCollectionsOptions | mongodb

    Interface ListCollectionsOptions

    interface ListCollectionsOptions {
        authdb?: string;
        authorizedCollections?: boolean;
        batchSize?: number;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        nameOnly?: boolean;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
    }

    Hierarchy

    Properties

    authdb? +ListCollectionsOptions | mongodb

    Interface ListCollectionsOptions

    interface ListCollectionsOptions {
        authdb?: string;
        authorizedCollections?: boolean;
        batchSize?: number;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        nameOnly?: boolean;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
    }

    Hierarchy

    Properties

    authdb?: string
    authorizedCollections?: boolean

    Since 4.0: If true and nameOnly is true, allows a user without the required privilege (i.e. listCollections action on the database) to run the command when access control is enforced.

    -
    batchSize?: number

    The batchSize for the returned command cursor or if pre 2.8 the systems batch collection

    -
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    authorizedCollections?: boolean

    Since 4.0: If true and nameOnly is true, allows a user without the required privilege (i.e. listCollections action on the database) to run the command when access control is enforced.

    +
    batchSize?: number

    The batchSize for the returned command cursor or if pre 2.8 the systems batch collection

    +
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    -
    maxTimeMS?: number
    nameOnly?: boolean

    Since 4.0: If true, will only return the collection name in the response, and will omit additional info

    -
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    nameOnly?: boolean

    Since 4.0: If true, will only return the collection name in the response, and will omit additional info

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -62,11 +64,12 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean
    +
    willRetryWrite?: boolean
    diff --git a/docs/Next/interfaces/ListDatabasesOptions.html b/docs/Next/interfaces/ListDatabasesOptions.html index 25c085f9999..15f2a9db18b 100644 --- a/docs/Next/interfaces/ListDatabasesOptions.html +++ b/docs/Next/interfaces/ListDatabasesOptions.html @@ -1,4 +1,4 @@ -ListDatabasesOptions | mongodb

    Interface ListDatabasesOptions

    interface ListDatabasesOptions {
        authdb?: string;
        authorizedDatabases?: boolean;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        filter?: Document;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        nameOnly?: boolean;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb? +ListDatabasesOptions | mongodb

    Interface ListDatabasesOptions

    interface ListDatabasesOptions {
        authdb?: string;
        authorizedDatabases?: boolean;
        bsonRegExp?: boolean;
        checkKeys?: boolean;
        collation?: CollationOptions;
        comment?: unknown;
        dbName?: string;
        enableUtf8Validation?: boolean;
        explain?: ExplainVerbosityLike | ExplainCommandOptions;
        fieldsAsRaw?: Document;
        filter?: Document;
        ignoreUndefined?: boolean;
        maxTimeMS?: number;
        nameOnly?: boolean;
        noResponse?: boolean;
        omitReadPreference?: boolean;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readPreference?: ReadPreferenceLike;
        retryWrites?: boolean;
        serializeFunctions?: boolean;
        session?: ClientSession;
        timeoutMS?: number;
        useBigInt64?: boolean;
        willRetryWrite?: boolean;
        writeConcern?: WriteConcern | WriteConcernSettings;
    }

    Hierarchy (view full)

    Properties

    authdb?: string
    authorizedDatabases?: boolean

    A flag that determines which databases are returned based on the user privileges when access control is enabled

    -
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    +

    Properties

    authdb?: string
    authorizedDatabases?: boolean

    A flag that determines which databases are returned based on the user privileges when access control is enabled

    +
    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    checkKeys?: boolean

    the serializer will check if keys are valid.

    false

    collation?: CollationOptions

    Collation

    -
    comment?: unknown

    Comment to apply to the operation.

    +
    comment?: unknown

    Comment to apply to the operation.

    In server versions pre-4.4, 'comment' must be string. A server error will be thrown if any other type is provided.

    In server versions 4.4 and above, 'comment' can be any valid BSON type.

    -
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    +
    dbName?: string
    enableUtf8Validation?: boolean

    Enable utf8 validation when deserializing BSON documents. Defaults to true.

    Specifies the verbosity mode for the explain output.

    -
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    +
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    filter?: Document

    A query predicate that determines which databases are listed

    -
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    -
    maxTimeMS?: number
    nameOnly?: boolean

    A flag to indicate whether the command should return just the database names, or return both database names and size information

    -
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    maxTimeMS?: number

    maxTimeMS is a server-side time limit in milliseconds for processing an operation.

    +
    nameOnly?: boolean

    A flag to indicate whether the command should return just the database names, or return both database names and size information

    +
    noResponse?: boolean
    omitReadPreference?: boolean
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    @@ -63,12 +65,13 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported)

    -
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    -
    retryWrites?: boolean

    Should retry failed writes

    -
    serializeFunctions?: boolean

    serialize the javascript functions

    +
    readPreference?: ReadPreferenceLike

    The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest).

    +
    retryWrites?: boolean

    Should retry failed writes

    +
    serializeFunctions?: boolean

    serialize the javascript functions

    false

    session?: ClientSession

    Specify ClientSession for this command

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    -
    willRetryWrite?: boolean

    Write Concern as an object

    -
    +
    willRetryWrite?: boolean

    Write Concern as an object

    +
    diff --git a/docs/Next/interfaces/ListDatabasesResult.html b/docs/Next/interfaces/ListDatabasesResult.html index e68a7b6478a..c3e23e661b2 100644 --- a/docs/Next/interfaces/ListDatabasesResult.html +++ b/docs/Next/interfaces/ListDatabasesResult.html @@ -1,5 +1,5 @@ -ListDatabasesResult | mongodb

    Interface ListDatabasesResult

    interface ListDatabasesResult {
        databases: ({
            empty?: boolean;
            name: string;
            sizeOnDisk?: number;
        } & Document)[];
        ok: 0 | 1;
        totalSize?: number;
        totalSizeMb?: number;
    }

    Properties

    databases +ListDatabasesResult | mongodb

    Interface ListDatabasesResult

    interface ListDatabasesResult {
        databases: ({
            empty?: boolean;
            name: string;
            sizeOnDisk?: number;
        } & Document)[];
        ok: 0 | 1;
        totalSize?: number;
        totalSizeMb?: number;
    }

    Properties

    databases: ({
        empty?: boolean;
        name: string;
        sizeOnDisk?: number;
    } & Document)[]
    ok: 0 | 1
    totalSize?: number
    totalSizeMb?: number
    +

    Properties

    databases: ({
        empty?: boolean;
        name: string;
        sizeOnDisk?: number;
    } & Document)[]
    ok: 0 | 1
    totalSize?: number
    totalSizeMb?: number
    diff --git a/docs/Next/interfaces/MongoClientOptions.html b/docs/Next/interfaces/MongoClientOptions.html index f42700bcad8..cfa46c9634f 100644 --- a/docs/Next/interfaces/MongoClientOptions.html +++ b/docs/Next/interfaces/MongoClientOptions.html @@ -1,6 +1,6 @@ MongoClientOptions | mongodb

    Interface MongoClientOptions

    Describes all possible URI query options for the mongo client

    interface MongoClientOptions {
        allowPartialTrustChain?: boolean;
        ALPNProtocols?: Uint8Array | string[] | Uint8Array[];
        appName?: string;
        auth?: Auth;
        authMechanism?: AuthMechanism;
        authMechanismProperties?: AuthMechanismProperties;
        authSource?: string;
        autoEncryption?: AutoEncryptionOptions;
        autoSelectFamily?: boolean;
        autoSelectFamilyAttemptTimeout?: number;
        bsonRegExp?: boolean;
        ca?: string | Buffer | (string | Buffer)[];
        cert?: string | Buffer | (string | Buffer)[];
        checkKeys?: boolean;
        checkServerIdentity?: ((hostname: string, cert: PeerCertificate) => Error | undefined);
        ciphers?: string;
        compressors?: string | (
            | "none"
            | "snappy"
            | "zlib"
            | "zstd")[];
        connectTimeoutMS?: number;
        crl?: string | Buffer | (string | Buffer)[];
        directConnection?: boolean;
        driverInfo?: DriverInfo;
        ecdhCurve?: string;
        enableUtf8Validation?: boolean;
        family?: number;
        fieldsAsRaw?: Document;
        forceServerObjectId?: boolean;
        heartbeatFrequencyMS?: number;
        hints?: number;
        ignoreUndefined?: boolean;
        journal?: boolean;
        key?: string | Buffer | (string | Buffer | KeyObject)[];
        loadBalanced?: boolean;
        localAddress?: string;
        localPort?: number;
        localThresholdMS?: number;
        lookup?: LookupFunction;
        maxConnecting?: number;
        maxIdleTimeMS?: number;
        maxPoolSize?: number;
        maxStalenessSeconds?: number;
        minDHSize?: number;
        minHeartbeatFrequencyMS?: number;
        minPoolSize?: number;
        monitorCommands?: boolean;
        noDelay?: boolean;
        passphrase?: string;
        pfx?: string | Buffer | (string | Buffer | PxfObject)[];
        pkFactory?: PkFactory;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        proxyHost?: string;
        proxyPassword?: string;
        proxyPort?: number;
        proxyUsername?: string;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readConcernLevel?: ReadConcernLevel;
        readPreference?: ReadPreference | ReadPreferenceMode;
        readPreferenceTags?: TagSet[];
        rejectUnauthorized?: boolean;
        replicaSet?: string;
        retryReads?: boolean;
        retryWrites?: boolean;
        secureContext?: SecureContext;
        secureProtocol?: string;
        serializeFunctions?: boolean;
        serverApi?: "1" | ServerApi;
        serverMonitoringMode?: ServerMonitoringMode;
        servername?: string;
        serverSelectionTimeoutMS?: number;
        session?: Buffer;
        socketTimeoutMS?: number;
        srvMaxHosts?: number;
        srvServiceName?: string;
        ssl?: boolean;
        tls?: boolean;
        tlsAllowInvalidCertificates?: boolean;
        tlsAllowInvalidHostnames?: boolean;
        tlsCAFile?: string;
        tlsCertificateKeyFile?: string;
        tlsCertificateKeyFilePassword?: string;
        tlsCRLFile?: string;
        tlsInsecure?: boolean;
        useBigInt64?: boolean;
        w?: W;
        waitQueueTimeoutMS?: number;
        writeConcern?: WriteConcern | WriteConcernSettings;
        wtimeoutMS?: number;
        zlibCompressionLevel?:
            | 0
            | 5
            | 1
            | 3
            | 9
            | 4
            | 2
            | 7
            | 6
            | 8;
    }

    Hierarchy (view full)

    Properties

    interface MongoClientOptions {
        allowPartialTrustChain?: boolean;
        ALPNProtocols?: Uint8Array | string[] | Uint8Array[];
        appName?: string;
        auth?: Auth;
        authMechanism?: AuthMechanism;
        authMechanismProperties?: AuthMechanismProperties;
        authSource?: string;
        autoEncryption?: AutoEncryptionOptions;
        autoSelectFamily?: boolean;
        autoSelectFamilyAttemptTimeout?: number;
        bsonRegExp?: boolean;
        ca?: string | Buffer | (string | Buffer)[];
        cert?: string | Buffer | (string | Buffer)[];
        checkKeys?: boolean;
        checkServerIdentity?: ((hostname: string, cert: PeerCertificate) => Error | undefined);
        ciphers?: string;
        compressors?: string | (
            | "none"
            | "snappy"
            | "zlib"
            | "zstd")[];
        connectTimeoutMS?: number;
        crl?: string | Buffer | (string | Buffer)[];
        directConnection?: boolean;
        driverInfo?: DriverInfo;
        ecdhCurve?: string;
        enableUtf8Validation?: boolean;
        family?: number;
        fieldsAsRaw?: Document;
        forceServerObjectId?: boolean;
        heartbeatFrequencyMS?: number;
        hints?: number;
        ignoreUndefined?: boolean;
        journal?: boolean;
        key?: string | Buffer | (string | Buffer | KeyObject)[];
        loadBalanced?: boolean;
        localAddress?: string;
        localPort?: number;
        localThresholdMS?: number;
        lookup?: LookupFunction;
        maxConnecting?: number;
        maxIdleTimeMS?: number;
        maxPoolSize?: number;
        maxStalenessSeconds?: number;
        minDHSize?: number;
        minHeartbeatFrequencyMS?: number;
        minPoolSize?: number;
        monitorCommands?: boolean;
        noDelay?: boolean;
        passphrase?: string;
        pfx?: string | Buffer | (string | Buffer | PxfObject)[];
        pkFactory?: PkFactory;
        promoteBuffers?: boolean;
        promoteLongs?: boolean;
        promoteValues?: boolean;
        proxyHost?: string;
        proxyPassword?: string;
        proxyPort?: number;
        proxyUsername?: string;
        raw?: boolean;
        readConcern?: ReadConcernLike;
        readConcernLevel?: ReadConcernLevel;
        readPreference?: ReadPreference | ReadPreferenceMode;
        readPreferenceTags?: TagSet[];
        rejectUnauthorized?: boolean;
        replicaSet?: string;
        retryReads?: boolean;
        retryWrites?: boolean;
        secureContext?: SecureContext;
        secureProtocol?: string;
        serializeFunctions?: boolean;
        serverApi?: "1" | ServerApi;
        serverMonitoringMode?: ServerMonitoringMode;
        servername?: string;
        serverSelectionTimeoutMS?: number;
        session?: Buffer;
        socketTimeoutMS?: number;
        srvMaxHosts?: number;
        srvServiceName?: string;
        ssl?: boolean;
        timeoutMS?: number;
        tls?: boolean;
        tlsAllowInvalidCertificates?: boolean;
        tlsAllowInvalidHostnames?: boolean;
        tlsCAFile?: string;
        tlsCertificateKeyFile?: string;
        tlsCertificateKeyFilePassword?: string;
        tlsCRLFile?: string;
        tlsInsecure?: boolean;
        useBigInt64?: boolean;
        w?: W;
        waitQueueTimeoutMS?: number;
        writeConcern?: WriteConcern | WriteConcernSettings;
        wtimeoutMS?: number;
        zlibCompressionLevel?:
            | 0
            | 5
            | 1
            | 3
            | 9
            | 4
            | 2
            | 7
            | 6
            | 8;
    }

    Hierarchy (view full)

    Properties

    allowPartialTrustChain?: boolean
    ALPNProtocols?: Uint8Array | string[] | Uint8Array[]

    An array of strings or a Buffer naming possible ALPN protocols. (Protocols should be ordered by their priority.)

    appName?: string

    The name of the application that created this MongoClient instance. MongoDB 3.4 and newer will print this value in the server log upon establishing each connection. It is also recorded in the slow query log and profile collections

    -
    auth?: Auth

    The auth settings for when connection to server.

    -
    authMechanism?: AuthMechanism

    Specify the authentication mechanism that MongoDB will use to authenticate the connection.

    -
    authMechanismProperties?: AuthMechanismProperties

    Specify properties for the specified authMechanism as a comma-separated list of colon-separated key-value pairs.

    -
    authSource?: string

    Specify the database name associated with the user’s credentials.

    -
    autoEncryption?: AutoEncryptionOptions

    Optionally enable in-use auto encryption

    +
    auth?: Auth

    The auth settings for when connection to server.

    +
    authMechanism?: AuthMechanism

    Specify the authentication mechanism that MongoDB will use to authenticate the connection.

    +
    authMechanismProperties?: AuthMechanismProperties

    Specify properties for the specified authMechanism as a comma-separated list of colon-separated key-value pairs.

    +
    authSource?: string

    Specify the database name associated with the user’s credentials.

    +
    autoEncryption?: AutoEncryptionOptions

    Optionally enable in-use auto encryption

    Automatic encryption is an enterprise only feature that only applies to operations on a collection. Automatic encryption is not supported for operations on a database or view, and operations that are not bypassed will result in error (see libmongocrypt: Auto Encryption Allow-List). To bypass automatic encryption for all operations, set bypassAutoEncryption=true in AutoEncryptionOpts.

    Automatic encryption requires the authenticated user to have the listCollections privilege action.

    @@ -107,7 +108,7 @@
  • AutoEncryptionOptions.bypassAutomaticEncryption is false.
  • If an internal MongoClient is created, it is configured with the same options as the parent MongoClient except minPoolSize is set to 0 and AutoEncryptionOptions is omitted.

    -
    autoSelectFamily?: boolean

    v18.13.0

    +
    autoSelectFamily?: boolean

    v18.13.0

    autoSelectFamilyAttemptTimeout?: number

    v18.13.0

    bsonRegExp?: boolean

    return BSON regular expressions as BSONRegExp instances.

    false

    @@ -146,11 +147,11 @@ ciphers can be obtained via tls.getCiphers(). Cipher names must be uppercased in order for OpenSSL to accept them.

    compressors?: string | (
        | "none"
        | "snappy"
        | "zlib"
        | "zstd")[]

    An array or comma-delimited string of compressors to enable network compression for communication between this client and a mongod/mongos instance.

    -
    connectTimeoutMS?: number

    The time in milliseconds to attempt a connection before timing out.

    -
    crl?: string | Buffer | (string | Buffer)[]

    PEM formatted CRLs (Certificate Revocation Lists).

    +
    connectTimeoutMS?: number

    The time in milliseconds to attempt a connection before timing out.

    +
    crl?: string | Buffer | (string | Buffer)[]

    PEM formatted CRLs (Certificate Revocation Lists).

    directConnection?: boolean

    Allow a driver to force a Single topology type with a connection string containing one host

    -
    driverInfo?: DriverInfo

    Allows a wrapping driver to amend the client metadata generated by the driver to include information about the wrapping driver

    -
    ecdhCurve?: string

    A string describing a named curve or a colon separated list of curve +

    driverInfo?: DriverInfo

    Allows a wrapping driver to amend the client metadata generated by the driver to include information about the wrapping driver

    +
    ecdhCurve?: string

    A string describing a named curve or a colon separated list of curve NIDs or names, for example P-521:P-384:P-256, to use for ECDH key agreement. Set to auto to select the curve automatically. Use crypto.getCurves() to obtain a list of available curve names. On @@ -161,13 +162,13 @@

    family?: number
    fieldsAsRaw?: Document

    allow to specify if there what fields we wish to return as unserialized raw buffer.

    null

    forceServerObjectId?: boolean

    Force server to assign _id values instead of driver

    -
    heartbeatFrequencyMS?: number

    heartbeatFrequencyMS controls when the driver checks the state of the MongoDB deployment. Specify the interval (in milliseconds) between checks, counted from the end of the previous check until the beginning of the next one.

    -
    hints?: number
    ignoreUndefined?: boolean

    serialize will not emit undefined fields +

    heartbeatFrequencyMS?: number

    heartbeatFrequencyMS controls when the driver checks the state of the MongoDB deployment. Specify the interval (in milliseconds) between checks, counted from the end of the previous check until the beginning of the next one.

    +
    hints?: number
    ignoreUndefined?: boolean

    serialize will not emit undefined fields note that the driver sets this to false

    true

    journal?: boolean

    The journal write concern

    Please use the writeConcern option instead

    -
    key?: string | Buffer | (string | Buffer | KeyObject)[]

    Private keys in PEM format. PEM allows the option of private keys +

    key?: string | Buffer | (string | Buffer | KeyObject)[]

    Private keys in PEM format. PEM allows the option of private keys being encrypted. Encrypted keys will be decrypted with options.passphrase. Multiple keys using different algorithms can be provided either as an array of unencrypted key strings or buffers, @@ -176,16 +177,16 @@ object.passphrase is optional. Encrypted keys will be decrypted with object.passphrase if provided, or options.passphrase if it is not.

    loadBalanced?: boolean

    Instruct the driver it is connecting to a load balancer fronting a mongos like service

    -
    localAddress?: string
    localPort?: number
    localThresholdMS?: number

    The size (in milliseconds) of the latency window for selecting among multiple suitable MongoDB instances.

    -
    lookup?: LookupFunction
    maxConnecting?: number

    The maximum number of connections that may be in the process of being established concurrently by the connection pool.

    -
    maxIdleTimeMS?: number

    The maximum number of milliseconds that a connection can remain idle in the pool before being removed and closed.

    -
    maxPoolSize?: number

    The maximum number of connections in the connection pool.

    -
    maxStalenessSeconds?: number

    Specifies, in seconds, how stale a secondary can be before the client stops using it for read operations.

    -
    minDHSize?: number
    minHeartbeatFrequencyMS?: number

    Sets the minimum heartbeat frequency. In the event that the driver has to frequently re-check a server's availability, it will wait at least this long since the previous check to avoid wasted effort.

    -
    minPoolSize?: number

    The minimum number of connections in the connection pool.

    -
    monitorCommands?: boolean

    Enable command monitoring for this client

    -
    noDelay?: boolean

    TCP Connection no delay

    -
    passphrase?: string

    Shared passphrase used for a single private key and/or a PFX.

    +
    localAddress?: string
    localPort?: number
    localThresholdMS?: number

    The size (in milliseconds) of the latency window for selecting among multiple suitable MongoDB instances.

    +
    lookup?: LookupFunction
    maxConnecting?: number

    The maximum number of connections that may be in the process of being established concurrently by the connection pool.

    +
    maxIdleTimeMS?: number

    The maximum number of milliseconds that a connection can remain idle in the pool before being removed and closed.

    +
    maxPoolSize?: number

    The maximum number of connections in the connection pool.

    +
    maxStalenessSeconds?: number

    Specifies, in seconds, how stale a secondary can be before the client stops using it for read operations.

    +
    minDHSize?: number
    minHeartbeatFrequencyMS?: number

    Sets the minimum heartbeat frequency. In the event that the driver has to frequently re-check a server's availability, it will wait at least this long since the previous check to avoid wasted effort.

    +
    minPoolSize?: number

    The minimum number of connections in the connection pool.

    +
    monitorCommands?: boolean

    Enable command monitoring for this client

    +
    noDelay?: boolean

    TCP Connection no delay

    +
    passphrase?: string

    Shared passphrase used for a single private key and/or a PFX.

    pfx?: string | Buffer | (string | Buffer | PxfObject)[]

    PFX or PKCS12 encoded private key and certificate chain. pfx is an alternative to providing key and cert individually. PFX is usually encrypted, if it is, passphrase will be used to decrypt it. Multiple @@ -195,17 +196,17 @@ object.passphrase is optional. Encrypted PFX will be decrypted with object.passphrase if provided, or options.passphrase if it is not.

    pkFactory?: PkFactory

    A primary key factory function for generation of custom _id keys

    -
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    +
    promoteBuffers?: boolean

    when deserializing a Binary will return it as a node.js Buffer instance.

    false

    promoteLongs?: boolean

    when deserializing a Long will fit it into a Number if it's smaller than 53 bits.

    true

    promoteValues?: boolean

    when deserializing will promote BSON values to their Node.js closest equivalent types.

    true

    proxyHost?: string

    Configures a Socks5 proxy host used for creating TCP connections.

    -
    proxyPassword?: string

    Configures a Socks5 proxy password when the proxy in proxyHost requires username/password authentication.

    -
    proxyPort?: number

    Configures a Socks5 proxy port used for creating TCP connections.

    -
    proxyUsername?: string

    Configures a Socks5 proxy username when the proxy in proxyHost requires username/password authentication.

    -
    raw?: boolean

    Enabling the raw option will return a Node.js Buffer +

    proxyPassword?: string

    Configures a Socks5 proxy password when the proxy in proxyHost requires username/password authentication.

    +
    proxyPort?: number

    Configures a Socks5 proxy port used for creating TCP connections.

    +
    proxyUsername?: string

    Configures a Socks5 proxy username when the proxy in proxyHost requires username/password authentication.

    +
    raw?: boolean

    Enabling the raw option will return a Node.js Buffer which is allocated using allocUnsafe API. See this section from the Node.js Docs here for more detail about what "unsafe" refers to in this context. @@ -217,10 +218,10 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    readConcern?: ReadConcernLike

    Specify a read concern for the collection (only MongoDB 3.2 or higher supported)

    -
    readConcernLevel?: ReadConcernLevel

    The level of isolation

    -

    Specifies the read preferences for this connection

    -
    readPreferenceTags?: TagSet[]

    Specifies the tags document as a comma-separated list of colon-separated key-value pairs.

    -
    rejectUnauthorized?: boolean

    If true the server will reject any connection which is not +

    readConcernLevel?: ReadConcernLevel

    The level of isolation

    +

    Specifies the read preferences for this connection

    +
    readPreferenceTags?: TagSet[]

    Specifies the tags document as a comma-separated list of colon-separated key-value pairs.

    +
    rejectUnauthorized?: boolean

    If true the server will reject any connection which is not authorized with the list of supplied CAs. This option only has an effect if requestCert is true.

    replicaSet?: string

    Specifies the name of the replica set, if the mongod is a member of a replica set.

    retryReads?: boolean

    Enables retryable reads.

    -
    retryWrites?: boolean

    Enable retryable writes.

    -
    secureContext?: SecureContext

    An optional TLS context object from tls.createSecureContext()

    +
    retryWrites?: boolean

    Enable retryable writes.

    +
    secureContext?: SecureContext

    An optional TLS context object from tls.createSecureContext()

    secureProtocol?: string

    Legacy mechanism to select the TLS protocol version to use, it does not support independent control of the minimum and maximum version, and does not support limiting the protocol to TLSv1.3. Use @@ -242,32 +243,33 @@

    serializeFunctions?: boolean

    serialize the javascript functions

    false

    serverApi?: "1" | ServerApi

    Server API version

    -
    serverMonitoringMode?: ServerMonitoringMode

    Instructs the driver monitors to use a specific monitoring mode

    -
    servername?: string
    serverSelectionTimeoutMS?: number

    Specifies how long (in milliseconds) to block for server selection before throwing an exception.

    -
    session?: Buffer

    An optional Buffer instance containing a TLS session.

    +
    serverMonitoringMode?: ServerMonitoringMode

    Instructs the driver monitors to use a specific monitoring mode

    +
    servername?: string
    serverSelectionTimeoutMS?: number

    Specifies how long (in milliseconds) to block for server selection before throwing an exception.

    +
    session?: Buffer

    An optional Buffer instance containing a TLS session.

    socketTimeoutMS?: number

    The time in milliseconds to attempt a send or receive on a socket before the attempt times out.

    -
    srvMaxHosts?: number

    The maximum number of hosts to connect to when using an srv connection string, a setting of 0 means unlimited hosts

    -
    srvServiceName?: string

    Modifies the srv URI to look like:

    +
    srvMaxHosts?: number

    The maximum number of hosts to connect to when using an srv connection string, a setting of 0 means unlimited hosts

    +
    srvServiceName?: string

    Modifies the srv URI to look like:

    _{srvServiceName}._tcp.{hostname}.{domainname}

    Querying this DNS URI is expected to respond with SRV records

    -
    ssl?: boolean

    A boolean to enable or disables TLS/SSL for the connection. (The ssl option is equivalent to the tls option.)

    -
    tls?: boolean

    Enables or disables TLS/SSL for the connection.

    -
    tlsAllowInvalidCertificates?: boolean

    Bypasses validation of the certificates presented by the mongod/mongos instance

    -
    tlsAllowInvalidHostnames?: boolean

    Disables hostname validation of the certificate presented by the mongod/mongos instance.

    -
    tlsCAFile?: string

    Specifies the location of a local .pem file that contains the root certificate chain from the Certificate Authority. This file is used to validate the certificate presented by the mongod/mongos instance.

    -
    tlsCertificateKeyFile?: string

    Specifies the location of a local .pem file that contains either the client's TLS/SSL certificate and key.

    -
    tlsCertificateKeyFilePassword?: string

    Specifies the password to de-crypt the tlsCertificateKeyFile.

    -
    tlsCRLFile?: string

    Specifies the location of a local CRL .pem file that contains the client revokation list.

    -
    tlsInsecure?: boolean

    Disables various certificate validations.

    -
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    +
    ssl?: boolean

    A boolean to enable or disables TLS/SSL for the connection. (The ssl option is equivalent to the tls option.)

    +
    timeoutMS?: number

    Specifies the time an operation will run until it throws a timeout error

    +
    tls?: boolean

    Enables or disables TLS/SSL for the connection.

    +
    tlsAllowInvalidCertificates?: boolean

    Bypasses validation of the certificates presented by the mongod/mongos instance

    +
    tlsAllowInvalidHostnames?: boolean

    Disables hostname validation of the certificate presented by the mongod/mongos instance.

    +
    tlsCAFile?: string

    Specifies the location of a local .pem file that contains the root certificate chain from the Certificate Authority. This file is used to validate the certificate presented by the mongod/mongos instance.

    +
    tlsCertificateKeyFile?: string

    Specifies the location of a local .pem file that contains either the client's TLS/SSL certificate and key.

    +
    tlsCertificateKeyFilePassword?: string

    Specifies the password to de-crypt the tlsCertificateKeyFile.

    +
    tlsCRLFile?: string

    Specifies the location of a local CRL .pem file that contains the client revokation list.

    +
    tlsInsecure?: boolean

    Disables various certificate validations.

    +
    useBigInt64?: boolean

    when deserializing a Long return as a BigInt.

    false

    w?: W

    The write concern w value

    Please use the writeConcern option instead

    -
    waitQueueTimeoutMS?: number

    The maximum time in milliseconds that a thread can wait for a connection to become available.

    -

    A MongoDB WriteConcern, which describes the level of acknowledgement +

    waitQueueTimeoutMS?: number

    The maximum time in milliseconds that a thread can wait for a connection to become available.

    +

    A MongoDB WriteConcern, which describes the level of acknowledgement requested from MongoDB for write operations.

    wtimeoutMS?: number

    The write concern timeout

    +
    wtimeoutMS?: number

    The write concern timeout

    Please use the writeConcern option instead

    -
    zlibCompressionLevel?:
        | 0
        | 5
        | 1
        | 3
        | 9
        | 4
        | 2
        | 7
        | 6
        | 8

    An integer that specifies the compression level if using zlib for network compression.

    -
    +
    zlibCompressionLevel?:
        | 0
        | 5
        | 1
        | 3
        | 9
        | 4
        | 2
        | 7
        | 6
        | 8

    An integer that specifies the compression level if using zlib for network compression.

    +
    diff --git a/docs/Next/interfaces/MongoNetworkErrorOptions.html b/docs/Next/interfaces/MongoNetworkErrorOptions.html index e774ca8a09e..7c9532b3d9f 100644 --- a/docs/Next/interfaces/MongoNetworkErrorOptions.html +++ b/docs/Next/interfaces/MongoNetworkErrorOptions.html @@ -1,4 +1,4 @@ -MongoNetworkErrorOptions | mongodb

    Interface MongoNetworkErrorOptions

    interface MongoNetworkErrorOptions {
        beforeHandshake?: boolean;
        cause?: Error;
    }

    Properties

    beforeHandshake? +MongoNetworkErrorOptions | mongodb

    Interface MongoNetworkErrorOptions

    interface MongoNetworkErrorOptions {
        beforeHandshake?: boolean;
        cause?: Error;
    }

    Properties

    beforeHandshake?: boolean

    Indicates the timeout happened before a connection handshake completed

    -
    cause?: Error
    +
    cause?: Error
    diff --git a/docs/Next/interfaces/MongoOptions.html b/docs/Next/interfaces/MongoOptions.html index d600facd5e6..e37d6ae90f1 100644 --- a/docs/Next/interfaces/MongoOptions.html +++ b/docs/Next/interfaces/MongoOptions.html @@ -11,7 +11,7 @@
  • DNS SRV records and TXT records
  • Not all options may be present after client construction as some are obtained from asynchronous operations.

    -
    interface MongoOptions {
        allowPartialTrustChain?: boolean;
        ALPNProtocols?: Uint8Array | string[] | Uint8Array[];
        appName?: string;
        autoEncryption: AutoEncryptionOptions;
        autoSelectFamily?: boolean;
        autoSelectFamilyAttemptTimeout?: number;
        ca?: string | Buffer | (string | Buffer)[];
        cert?: string | Buffer | (string | Buffer)[];
        checkServerIdentity?: ((hostname: string, cert: PeerCertificate) => Error | undefined);
        ciphers?: string;
        compressors: (
            | "none"
            | "snappy"
            | "zlib"
            | "zstd")[];
        connectTimeoutMS: number;
        credentials?: MongoCredentials;
        crl?: string | Buffer | (string | Buffer)[];
        dbName: string;
        directConnection: boolean;
        driverInfo: DriverInfo;
        ecdhCurve?: string;
        family?: number;
        forceServerObjectId: boolean;
        heartbeatFrequencyMS: number;
        hints?: number;
        hosts: HostAddress[];
        key?: string | Buffer | (string | Buffer | KeyObject)[];
        loadBalanced: boolean;
        localAddress?: string;
        localPort?: number;
        localThresholdMS: number;
        lookup?: LookupFunction;
        maxConnecting: number;
        maxIdleTimeMS: number;
        maxPoolSize: number;
        metadata: ClientMetadata;
        minDHSize?: number;
        minHeartbeatFrequencyMS: number;
        minPoolSize: number;
        monitorCommands: boolean;
        noDelay: boolean;
        passphrase?: string;
        pfx?: string | Buffer | (string | Buffer | PxfObject)[];
        pkFactory: PkFactory;
        proxyHost?: string;
        proxyPassword?: string;
        proxyPort?: number;
        proxyUsername?: string;
        raw: boolean;
        readConcern: ReadConcern;
        readPreference: ReadPreference;
        rejectUnauthorized?: boolean;
        replicaSet: string;
        retryReads: boolean;
        retryWrites: boolean;
        secureContext?: SecureContext;
        secureProtocol?: string;
        serverApi: ServerApi;
        serverMonitoringMode: ServerMonitoringMode;
        servername?: string;
        serverSelectionTimeoutMS: number;
        session?: Buffer;
        socketTimeoutMS: number;
        srvHost?: string;
        srvMaxHosts: number;
        srvServiceName: string;
        tls: boolean;
        tlsAllowInvalidCertificates: boolean;
        tlsAllowInvalidHostnames: boolean;
        tlsCAFile?: string;
        tlsCertificateKeyFile?: string;
        tlsCRLFile?: string;
        tlsInsecure: boolean;
        waitQueueTimeoutMS: number;
        writeConcern: WriteConcern;
        zlibCompressionLevel:
            | 0
            | 1
            | 2
            | 3
            | 4
            | 5
            | 6
            | 7
            | 8
            | 9;
    }

    Hierarchy (view full)

    • Required<Pick<MongoClientOptions,
          | "autoEncryption"
          | "connectTimeoutMS"
          | "directConnection"
          | "driverInfo"
          | "forceServerObjectId"
          | "minHeartbeatFrequencyMS"
          | "heartbeatFrequencyMS"
          | "localThresholdMS"
          | "maxConnecting"
          | "maxIdleTimeMS"
          | "maxPoolSize"
          | "minPoolSize"
          | "monitorCommands"
          | "noDelay"
          | "pkFactory"
          | "raw"
          | "replicaSet"
          | "retryReads"
          | "retryWrites"
          | "serverSelectionTimeoutMS"
          | "socketTimeoutMS"
          | "srvMaxHosts"
          | "srvServiceName"
          | "tlsAllowInvalidCertificates"
          | "tlsAllowInvalidHostnames"
          | "tlsInsecure"
          | "waitQueueTimeoutMS"
          | "zlibCompressionLevel">>
    • SupportedNodeConnectionOptions
      • MongoOptions

    Properties

    interface MongoOptions {
        allowPartialTrustChain?: boolean;
        ALPNProtocols?: Uint8Array | string[] | Uint8Array[];
        appName?: string;
        autoEncryption: AutoEncryptionOptions;
        autoSelectFamily?: boolean;
        autoSelectFamilyAttemptTimeout?: number;
        ca?: string | Buffer | (string | Buffer)[];
        cert?: string | Buffer | (string | Buffer)[];
        checkServerIdentity?: ((hostname: string, cert: PeerCertificate) => Error | undefined);
        ciphers?: string;
        compressors: (
            | "none"
            | "snappy"
            | "zlib"
            | "zstd")[];
        connectTimeoutMS: number;
        credentials?: MongoCredentials;
        crl?: string | Buffer | (string | Buffer)[];
        dbName: string;
        directConnection: boolean;
        driverInfo: DriverInfo;
        ecdhCurve?: string;
        family?: number;
        forceServerObjectId: boolean;
        heartbeatFrequencyMS: number;
        hints?: number;
        hosts: HostAddress[];
        key?: string | Buffer | (string | Buffer | KeyObject)[];
        loadBalanced: boolean;
        localAddress?: string;
        localPort?: number;
        localThresholdMS: number;
        lookup?: LookupFunction;
        maxConnecting: number;
        maxIdleTimeMS: number;
        maxPoolSize: number;
        metadata: ClientMetadata;
        minDHSize?: number;
        minHeartbeatFrequencyMS: number;
        minPoolSize: number;
        monitorCommands: boolean;
        noDelay: boolean;
        passphrase?: string;
        pfx?: string | Buffer | (string | Buffer | PxfObject)[];
        pkFactory: PkFactory;
        proxyHost?: string;
        proxyPassword?: string;
        proxyPort?: number;
        proxyUsername?: string;
        raw: boolean;
        readConcern: ReadConcern;
        readPreference: ReadPreference;
        rejectUnauthorized?: boolean;
        replicaSet: string;
        retryReads: boolean;
        retryWrites: boolean;
        secureContext?: SecureContext;
        secureProtocol?: string;
        serverApi: ServerApi;
        serverMonitoringMode: ServerMonitoringMode;
        servername?: string;
        serverSelectionTimeoutMS: number;
        session?: Buffer;
        socketTimeoutMS: number;
        srvHost?: string;
        srvMaxHosts: number;
        srvServiceName: string;
        timeoutMS?: number;
        tls: boolean;
        tlsAllowInvalidCertificates: boolean;
        tlsAllowInvalidHostnames: boolean;
        tlsCAFile?: string;
        tlsCertificateKeyFile?: string;
        tlsCRLFile?: string;
        tlsInsecure: boolean;
        waitQueueTimeoutMS: number;
        writeConcern: WriteConcern;
        zlibCompressionLevel:
            | 0
            | 1
            | 2
            | 3
            | 4
            | 5
            | 6
            | 7
            | 8
            | 9;
    }

    Hierarchy (view full)

    • Required<Pick<MongoClientOptions,
          | "autoEncryption"
          | "connectTimeoutMS"
          | "directConnection"
          | "driverInfo"
          | "forceServerObjectId"
          | "minHeartbeatFrequencyMS"
          | "heartbeatFrequencyMS"
          | "localThresholdMS"
          | "maxConnecting"
          | "maxIdleTimeMS"
          | "maxPoolSize"
          | "minPoolSize"
          | "monitorCommands"
          | "noDelay"
          | "pkFactory"
          | "raw"
          | "replicaSet"
          | "retryReads"
          | "retryWrites"
          | "serverSelectionTimeoutMS"
          | "socketTimeoutMS"
          | "srvMaxHosts"
          | "srvServiceName"
          | "tlsAllowInvalidCertificates"
          | "tlsAllowInvalidHostnames"
          | "tlsInsecure"
          | "waitQueueTimeoutMS"
          | "zlibCompressionLevel">>
    • SupportedNodeConnectionOptions
      • MongoOptions

    Properties

    allowPartialTrustChain?: boolean
    ALPNProtocols?: Uint8Array | string[] | Uint8Array[]

    An array of strings or a Buffer naming possible ALPN protocols. (Protocols should be ordered by their priority.)

    -
    appName?: string
    autoEncryption: AutoEncryptionOptions

    Optionally enable in-use auto encryption

    +
    appName?: string
    autoEncryption: AutoEncryptionOptions

    Optionally enable in-use auto encryption

    Automatic encryption is an enterprise only feature that only applies to operations on a collection. Automatic encryption is not supported for operations on a database or view, and operations that are not bypassed will result in error (see libmongocrypt: Auto Encryption Allow-List). To bypass automatic encryption for all operations, set bypassAutoEncryption=true in AutoEncryptionOpts.

    Automatic encryption requires the authenticated user to have the listCollections privilege action.

    @@ -96,7 +97,7 @@
  • AutoEncryptionOptions.bypassAutomaticEncryption is false.
  • If an internal MongoClient is created, it is configured with the same options as the parent MongoClient except minPoolSize is set to 0 and AutoEncryptionOptions is omitted.

    -
    autoSelectFamily?: boolean

    v18.13.0

    +
    autoSelectFamily?: boolean

    v18.13.0

    autoSelectFamilyAttemptTimeout?: number

    v18.13.0

    ca?: string | Buffer | (string | Buffer)[]

    Optionally override the trusted CA certificates. Default is to trust the well-known CAs curated by Mozilla. Mozilla's CAs are completely @@ -130,11 +131,11 @@ information, see modifying the default cipher suite. Permitted ciphers can be obtained via tls.getCiphers(). Cipher names must be uppercased in order for OpenSSL to accept them.

    -
    compressors: (
        | "none"
        | "snappy"
        | "zlib"
        | "zstd")[]
    connectTimeoutMS: number

    The time in milliseconds to attempt a connection before timing out.

    -
    credentials?: MongoCredentials
    crl?: string | Buffer | (string | Buffer)[]

    PEM formatted CRLs (Certificate Revocation Lists).

    -
    dbName: string
    directConnection: boolean

    Allow a driver to force a Single topology type with a connection string containing one host

    -
    driverInfo: DriverInfo

    Allows a wrapping driver to amend the client metadata generated by the driver to include information about the wrapping driver

    -
    ecdhCurve?: string

    A string describing a named curve or a colon separated list of curve +

    compressors: (
        | "none"
        | "snappy"
        | "zlib"
        | "zstd")[]
    connectTimeoutMS: number

    The time in milliseconds to attempt a connection before timing out.

    +
    credentials?: MongoCredentials
    crl?: string | Buffer | (string | Buffer)[]

    PEM formatted CRLs (Certificate Revocation Lists).

    +
    dbName: string
    directConnection: boolean

    Allow a driver to force a Single topology type with a connection string containing one host

    +
    driverInfo: DriverInfo

    Allows a wrapping driver to amend the client metadata generated by the driver to include information about the wrapping driver

    +
    ecdhCurve?: string

    A string describing a named curve or a colon separated list of curve NIDs or names, for example P-521:P-384:P-256, to use for ECDH key agreement. Set to auto to select the curve automatically. Use crypto.getCurves() to obtain a list of available curve names. On @@ -142,8 +143,8 @@ name and description of each available elliptic curve. Default: tls.DEFAULT_ECDH_CURVE.

    family?: number
    forceServerObjectId: boolean

    Force server to assign _id values instead of driver

    -
    heartbeatFrequencyMS: number

    heartbeatFrequencyMS controls when the driver checks the state of the MongoDB deployment. Specify the interval (in milliseconds) between checks, counted from the end of the previous check until the beginning of the next one.

    -
    hints?: number
    hosts: HostAddress[]
    key?: string | Buffer | (string | Buffer | KeyObject)[]

    Private keys in PEM format. PEM allows the option of private keys +

    heartbeatFrequencyMS: number

    heartbeatFrequencyMS controls when the driver checks the state of the MongoDB deployment. Specify the interval (in milliseconds) between checks, counted from the end of the previous check until the beginning of the next one.

    +
    hints?: number
    hosts: HostAddress[]
    key?: string | Buffer | (string | Buffer | KeyObject)[]

    Private keys in PEM format. PEM allows the option of private keys being encrypted. Encrypted keys will be decrypted with options.passphrase. Multiple keys using different algorithms can be provided either as an array of unencrypted key strings or buffers, @@ -151,15 +152,15 @@ passphrase: ]}. The object form can only occur in an array. object.passphrase is optional. Encrypted keys will be decrypted with object.passphrase if provided, or options.passphrase if it is not.

    -
    loadBalanced: boolean
    localAddress?: string
    localPort?: number
    localThresholdMS: number

    The size (in milliseconds) of the latency window for selecting among multiple suitable MongoDB instances.

    -
    lookup?: LookupFunction
    maxConnecting: number

    The maximum number of connections that may be in the process of being established concurrently by the connection pool.

    -
    maxIdleTimeMS: number

    The maximum number of milliseconds that a connection can remain idle in the pool before being removed and closed.

    -
    maxPoolSize: number

    The maximum number of connections in the connection pool.

    -
    metadata: ClientMetadata
    minDHSize?: number
    minHeartbeatFrequencyMS: number

    Sets the minimum heartbeat frequency. In the event that the driver has to frequently re-check a server's availability, it will wait at least this long since the previous check to avoid wasted effort.

    -
    minPoolSize: number

    The minimum number of connections in the connection pool.

    -
    monitorCommands: boolean

    Enable command monitoring for this client

    -
    noDelay: boolean

    TCP Connection no delay

    -
    passphrase?: string

    Shared passphrase used for a single private key and/or a PFX.

    +
    loadBalanced: boolean
    localAddress?: string
    localPort?: number
    localThresholdMS: number

    The size (in milliseconds) of the latency window for selecting among multiple suitable MongoDB instances.

    +
    lookup?: LookupFunction
    maxConnecting: number

    The maximum number of connections that may be in the process of being established concurrently by the connection pool.

    +
    maxIdleTimeMS: number

    The maximum number of milliseconds that a connection can remain idle in the pool before being removed and closed.

    +
    maxPoolSize: number

    The maximum number of connections in the connection pool.

    +
    metadata: ClientMetadata
    minDHSize?: number
    minHeartbeatFrequencyMS: number

    Sets the minimum heartbeat frequency. In the event that the driver has to frequently re-check a server's availability, it will wait at least this long since the previous check to avoid wasted effort.

    +
    minPoolSize: number

    The minimum number of connections in the connection pool.

    +
    monitorCommands: boolean

    Enable command monitoring for this client

    +
    noDelay: boolean

    TCP Connection no delay

    +
    passphrase?: string

    Shared passphrase used for a single private key and/or a PFX.

    pfx?: string | Buffer | (string | Buffer | PxfObject)[]

    PFX or PKCS12 encoded private key and certificate chain. pfx is an alternative to providing key and cert individually. PFX is usually encrypted, if it is, passphrase will be used to decrypt it. Multiple @@ -169,7 +170,7 @@ object.passphrase is optional. Encrypted PFX will be decrypted with object.passphrase if provided, or options.passphrase if it is not.

    pkFactory: PkFactory

    A primary key factory function for generation of custom _id keys

    -
    proxyHost?: string
    proxyPassword?: string
    proxyPort?: number
    proxyUsername?: string
    raw: boolean

    Enabling the raw option will return a Node.js Buffer +

    proxyHost?: string
    proxyPassword?: string
    proxyPort?: number
    proxyUsername?: string
    raw: boolean

    Enabling the raw option will return a Node.js Buffer which is allocated using allocUnsafe API. See this section from the Node.js Docs here for more detail about what "unsafe" refers to in this context. @@ -180,7 +181,7 @@

    Please note there is a known limitation where this option cannot be used at the MongoClient level (see NODE-3946). It does correctly work at Db, Collection, and per operation the same as other BSON options work.

    -
    readConcern: ReadConcern
    readPreference: ReadPreference
    rejectUnauthorized?: boolean

    If true the server will reject any connection which is not +

    readConcern: ReadConcern
    readPreference: ReadPreference
    rejectUnauthorized?: boolean

    If true the server will reject any connection which is not authorized with the list of supplied CAs. This option only has an effect if requestCert is true.

    replicaSet: string

    Specifies the name of the replica set, if the mongod is a member of a replica set.

    retryReads: boolean

    Enables retryable reads.

    -
    retryWrites: boolean

    Enable retryable writes.

    -
    secureContext?: SecureContext

    An optional TLS context object from tls.createSecureContext()

    +
    retryWrites: boolean

    Enable retryable writes.

    +
    secureContext?: SecureContext

    An optional TLS context object from tls.createSecureContext()

    secureProtocol?: string

    Legacy mechanism to select the TLS protocol version to use, it does not support independent control of the minimum and maximum version, and does not support limiting the protocol to TLSv1.3. Use @@ -199,14 +200,14 @@

    serverApi: ServerApi
    serverMonitoringMode: ServerMonitoringMode
    servername?: string
    serverSelectionTimeoutMS: number

    Specifies how long (in milliseconds) to block for server selection before throwing an exception.

    -
    session?: Buffer

    An optional Buffer instance containing a TLS session.

    +
    serverApi: ServerApi
    serverMonitoringMode: ServerMonitoringMode
    servername?: string
    serverSelectionTimeoutMS: number

    Specifies how long (in milliseconds) to block for server selection before throwing an exception.

    +
    session?: Buffer

    An optional Buffer instance containing a TLS session.

    socketTimeoutMS: number

    The time in milliseconds to attempt a send or receive on a socket before the attempt times out.

    -
    srvHost?: string
    srvMaxHosts: number

    The maximum number of hosts to connect to when using an srv connection string, a setting of 0 means unlimited hosts

    -
    srvServiceName: string

    Modifies the srv URI to look like:

    +
    srvHost?: string
    srvMaxHosts: number

    The maximum number of hosts to connect to when using an srv connection string, a setting of 0 means unlimited hosts

    +
    srvServiceName: string

    Modifies the srv URI to look like:

    _{srvServiceName}._tcp.{hostname}.{domainname}

    Querying this DNS URI is expected to respond with SRV records

    -
    tls: boolean

    NOTE ABOUT TLS Options

    If tls is provided as an option, it is equivalent to setting the ssl option.

    +
    timeoutMS?: number
    tls: boolean

    NOTE ABOUT TLS Options

    If tls is provided as an option, it is equivalent to setting the ssl option.

    NodeJS native TLS options are passed through to the socket and retain their original types.

    @@ -269,9 +270,9 @@