This commit is contained in:
PLBXNebulia-Formation 2025-11-21 09:23:11 +01:00
commit d1c8cae2c1
1417 changed files with 326736 additions and 0 deletions

View file

@ -0,0 +1,51 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ClientBulkWriteOperation = void 0;
const responses_1 = require("../../cmap/wire_protocol/responses");
const utils_1 = require("../../utils");
const command_1 = require("../command");
const operation_1 = require("../operation");
/**
* Executes a single client bulk write operation within a potential batch.
* @internal
*/
class ClientBulkWriteOperation extends command_1.CommandOperation {
get commandName() {
return 'bulkWrite';
}
constructor(commandBuilder, options) {
super(undefined, options);
this.SERVER_COMMAND_RESPONSE_TYPE = responses_1.ClientBulkWriteCursorResponse;
this.commandBuilder = commandBuilder;
this.options = options;
this.ns = new utils_1.MongoDBNamespace('admin', '$cmd');
}
resetBatch() {
return this.commandBuilder.resetBatch();
}
get canRetryWrite() {
return this.commandBuilder.isBatchRetryable;
}
handleOk(response) {
return response;
}
buildCommandDocument(connection, _session) {
const command = this.commandBuilder.buildBatch(connection.description.maxMessageSizeBytes, connection.description.maxWriteBatchSize, connection.description.maxBsonObjectSize);
// Check _after_ the batch is built if we cannot retry it and override the option.
if (!this.canRetryWrite) {
this.options.willRetryWrite = false;
}
return command;
}
}
exports.ClientBulkWriteOperation = ClientBulkWriteOperation;
// Skipping the collation as it goes on the individual ops.
(0, operation_1.defineAspects)(ClientBulkWriteOperation, [
operation_1.Aspect.WRITE_OPERATION,
operation_1.Aspect.SKIP_COLLATION,
operation_1.Aspect.CURSOR_CREATING,
operation_1.Aspect.RETRYABLE,
operation_1.Aspect.COMMAND_BATCHING,
operation_1.Aspect.SUPPORTS_RAW_DATA
]);
//# sourceMappingURL=client_bulk_write.js.map

View file

@ -0,0 +1 @@
{"version":3,"file":"client_bulk_write.js","sourceRoot":"","sources":["../../../src/operations/client_bulk_write/client_bulk_write.ts"],"names":[],"mappings":";;;AACA,kEAAmF;AAEnF,uCAA+C;AAC/C,wCAA8C;AAC9C,4CAAqD;AAIrD;;;GAGG;AACH,MAAa,wBAAyB,SAAQ,0BAA+C;IAM3F,IAAa,WAAW;QACtB,OAAO,WAAoB,CAAC;IAC9B,CAAC;IAED,YAAY,cAA6C,EAAE,OAA+B;QACxF,KAAK,CAAC,SAAS,EAAE,OAAO,CAAC,CAAC;QAVnB,iCAA4B,GAAG,yCAA6B,CAAC;QAWpE,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;QACrC,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;QACvB,IAAI,CAAC,EAAE,GAAG,IAAI,wBAAgB,CAAC,OAAO,EAAE,MAAM,CAAC,CAAC;IAClD,CAAC;IAEQ,UAAU;QACjB,OAAO,IAAI,CAAC,cAAc,CAAC,UAAU,EAAE,CAAC;IAC1C,CAAC;IAED,IAAa,aAAa;QACxB,OAAO,IAAI,CAAC,cAAc,CAAC,gBAAgB,CAAC;IAC9C,CAAC;IAEQ,QAAQ,CACf,QAAgE;QAEhE,OAAO,QAAQ,CAAC;IAClB,CAAC;IAEQ,oBAAoB,CAC3B,UAAsB,EACtB,QAAwB;QAExB,MAAM,OAAO,GAAG,IAAI,CAAC,cAAc,CAAC,UAAU,CAC5C,UAAU,CAAC,WAAW,CAAC,mBAAmB,EAC1C,UAAU,CAAC,WAAW,CAAC,iBAAiB,EACxC,UAAU,CAAC,WAAW,CAAC,iBAAiB,CACzC,CAAC;QAEF,kFAAkF;QAClF,IAAI,CAAC,IAAI,CAAC,aAAa,EAAE,CAAC;YACxB,IAAI,CAAC,OAAO,CAAC,cAAc,GAAG,KAAK,CAAC;QACtC,CAAC;QAED,OAAO,OAAO,CAAC;IACjB,CAAC;CACF;AAhDD,4DAgDC;AAED,2DAA2D;AAC3D,IAAA,yBAAa,EAAC,wBAAwB,EAAE;IACtC,kBAAM,CAAC,eAAe;IACtB,kBAAM,CAAC,cAAc;IACrB,kBAAM,CAAC,eAAe;IACtB,kBAAM,CAAC,SAAS;IAChB,kBAAM,CAAC,gBAAgB;IACvB,kBAAM,CAAC,iBAAiB;CACzB,CAAC,CAAC"}

View file

@ -0,0 +1,340 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.buildReplaceOneOperation = exports.buildUpdateManyOperation = exports.buildUpdateOneOperation = exports.buildDeleteManyOperation = exports.buildDeleteOneOperation = exports.buildInsertOneOperation = exports.ClientBulkWriteCommandBuilder = void 0;
exports.buildOperation = buildOperation;
const bson_1 = require("../../bson");
const commands_1 = require("../../cmap/commands");
const error_1 = require("../../error");
const sort_1 = require("../../sort");
const utils_1 = require("../../utils");
/**
* The bytes overhead for the extra fields added post command generation.
*/
const MESSAGE_OVERHEAD_BYTES = 1000;
/** @internal */
class ClientBulkWriteCommandBuilder {
/**
* Create the command builder.
* @param models - The client write models.
*/
constructor(models, options, pkFactory) {
this.models = models;
this.options = options;
this.pkFactory = pkFactory ?? utils_1.DEFAULT_PK_FACTORY;
this.currentModelIndex = 0;
this.previousModelIndex = 0;
this.lastOperations = [];
this.isBatchRetryable = true;
}
/**
* Gets the errorsOnly value for the command, which is the inverse of the
* user provided verboseResults option. Defaults to true.
*/
get errorsOnly() {
if ('verboseResults' in this.options) {
return !this.options.verboseResults;
}
return true;
}
/**
* Determines if there is another batch to process.
* @returns True if not all batches have been built.
*/
hasNextBatch() {
return this.currentModelIndex < this.models.length;
}
/**
* When we need to retry a command we need to set the current
* model index back to its previous value.
*/
resetBatch() {
this.currentModelIndex = this.previousModelIndex;
return true;
}
/**
* Build a single batch of a client bulk write command.
* @param maxMessageSizeBytes - The max message size in bytes.
* @param maxWriteBatchSize - The max write batch size.
* @returns The client bulk write command.
*/
buildBatch(maxMessageSizeBytes, maxWriteBatchSize, maxBsonObjectSize) {
// We start by assuming the batch has no multi-updates, so it is retryable
// until we find them.
this.isBatchRetryable = true;
let commandLength = 0;
let currentNamespaceIndex = 0;
const command = this.baseCommand();
const namespaces = new Map();
// In the case of retries we need to mark where we started this batch.
this.previousModelIndex = this.currentModelIndex;
while (this.currentModelIndex < this.models.length) {
const model = this.models[this.currentModelIndex];
const ns = model.namespace;
const nsIndex = namespaces.get(ns);
// Multi updates are not retryable.
if (model.name === 'deleteMany' || model.name === 'updateMany') {
this.isBatchRetryable = false;
}
if (nsIndex != null) {
// Build the operation and serialize it to get the bytes buffer.
const operation = buildOperation(model, nsIndex, this.pkFactory, this.options);
let operationBuffer;
try {
operationBuffer = bson_1.BSON.serialize(operation);
}
catch (cause) {
throw new error_1.MongoInvalidArgumentError(`Could not serialize operation to BSON`, { cause });
}
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
// Check if the operation buffer can fit in the command. If it can,
// then add the operation to the document sequence and increment the
// current length as long as the ops don't exceed the maxWriteBatchSize.
if (commandLength + operationBuffer.length < maxMessageSizeBytes &&
command.ops.documents.length < maxWriteBatchSize) {
// Pushing to the ops document sequence returns the total byte length of the document sequence.
commandLength = MESSAGE_OVERHEAD_BYTES + command.ops.push(operation, operationBuffer);
// Increment the builder's current model index.
this.currentModelIndex++;
}
else {
// The operation cannot fit in the current command and will need to
// go in the next batch. Exit the loop.
break;
}
}
else {
// The namespace is not already in the nsInfo so we will set it in the map, and
// construct our nsInfo and ops documents and buffers.
namespaces.set(ns, currentNamespaceIndex);
const nsInfo = { ns: ns };
const operation = buildOperation(model, currentNamespaceIndex, this.pkFactory, this.options);
let nsInfoBuffer;
let operationBuffer;
try {
nsInfoBuffer = bson_1.BSON.serialize(nsInfo);
operationBuffer = bson_1.BSON.serialize(operation);
}
catch (cause) {
throw new error_1.MongoInvalidArgumentError(`Could not serialize ns info to BSON`, { cause });
}
validateBufferSize('nsInfo', nsInfoBuffer, maxBsonObjectSize);
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
// Check if the operation and nsInfo buffers can fit in the command. If they
// can, then add the operation and nsInfo to their respective document
// sequences and increment the current length as long as the ops don't exceed
// the maxWriteBatchSize.
if (commandLength + nsInfoBuffer.length + operationBuffer.length < maxMessageSizeBytes &&
command.ops.documents.length < maxWriteBatchSize) {
// Pushing to the ops document sequence returns the total byte length of the document sequence.
commandLength =
MESSAGE_OVERHEAD_BYTES +
command.nsInfo.push(nsInfo, nsInfoBuffer) +
command.ops.push(operation, operationBuffer);
// We've added a new namespace, increment the namespace index.
currentNamespaceIndex++;
// Increment the builder's current model index.
this.currentModelIndex++;
}
else {
// The operation cannot fit in the current command and will need to
// go in the next batch. Exit the loop.
break;
}
}
}
// Set the last operations and return the command.
this.lastOperations = command.ops.documents;
return command;
}
baseCommand() {
const command = {
bulkWrite: 1,
errorsOnly: this.errorsOnly,
ordered: this.options.ordered ?? true,
ops: new commands_1.DocumentSequence('ops'),
nsInfo: new commands_1.DocumentSequence('nsInfo')
};
// Add bypassDocumentValidation if it was present in the options.
if (this.options.bypassDocumentValidation != null) {
command.bypassDocumentValidation = this.options.bypassDocumentValidation;
}
// Add let if it was present in the options.
if (this.options.let) {
command.let = this.options.let;
}
// we check for undefined specifically here to allow falsy values
// eslint-disable-next-line no-restricted-syntax
if (this.options.comment !== undefined) {
command.comment = this.options.comment;
}
return command;
}
}
exports.ClientBulkWriteCommandBuilder = ClientBulkWriteCommandBuilder;
function validateBufferSize(name, buffer, maxBsonObjectSize) {
if (buffer.length > maxBsonObjectSize) {
throw new error_1.MongoInvalidArgumentError(`Client bulk write operation ${name} of length ${buffer.length} exceeds the max bson object size of ${maxBsonObjectSize}`);
}
}
/**
* Build the insert one operation.
* @param model - The insert one model.
* @param index - The namespace index.
* @returns the operation.
*/
const buildInsertOneOperation = (model, index, pkFactory) => {
const document = {
insert: index,
document: model.document
};
document.document._id = model.document._id ?? pkFactory.createPk();
return document;
};
exports.buildInsertOneOperation = buildInsertOneOperation;
/**
* Build the delete one operation.
* @param model - The insert many model.
* @param index - The namespace index.
* @returns the operation.
*/
const buildDeleteOneOperation = (model, index) => {
return createDeleteOperation(model, index, false);
};
exports.buildDeleteOneOperation = buildDeleteOneOperation;
/**
* Build the delete many operation.
* @param model - The delete many model.
* @param index - The namespace index.
* @returns the operation.
*/
const buildDeleteManyOperation = (model, index) => {
return createDeleteOperation(model, index, true);
};
exports.buildDeleteManyOperation = buildDeleteManyOperation;
/**
* Creates a delete operation based on the parameters.
*/
function createDeleteOperation(model, index, multi) {
const document = {
delete: index,
multi: multi,
filter: model.filter
};
if (model.hint) {
document.hint = model.hint;
}
if (model.collation) {
document.collation = model.collation;
}
return document;
}
/**
* Build the update one operation.
* @param model - The update one model.
* @param index - The namespace index.
* @returns the operation.
*/
const buildUpdateOneOperation = (model, index, options) => {
return createUpdateOperation(model, index, false, options);
};
exports.buildUpdateOneOperation = buildUpdateOneOperation;
/**
* Build the update many operation.
* @param model - The update many model.
* @param index - The namespace index.
* @returns the operation.
*/
const buildUpdateManyOperation = (model, index, options) => {
return createUpdateOperation(model, index, true, options);
};
exports.buildUpdateManyOperation = buildUpdateManyOperation;
/**
* Validate the update document.
* @param update - The update document.
*/
function validateUpdate(update, options) {
if (!(0, utils_1.hasAtomicOperators)(update, options)) {
throw new error_1.MongoAPIError('Client bulk write update models must only contain atomic modifiers (start with $) and must not be empty.');
}
}
/**
* Creates a delete operation based on the parameters.
*/
function createUpdateOperation(model, index, multi, options) {
// Update documents provided in UpdateOne and UpdateMany write models are
// required only to contain atomic modifiers (i.e. keys that start with "$").
// Drivers MUST throw an error if an update document is empty or if the
// document's first key does not start with "$".
validateUpdate(model.update, options);
const document = {
update: index,
multi: multi,
filter: model.filter,
updateMods: model.update
};
if (model.hint) {
document.hint = model.hint;
}
if (model.upsert) {
document.upsert = model.upsert;
}
if (model.arrayFilters) {
document.arrayFilters = model.arrayFilters;
}
if (model.collation) {
document.collation = model.collation;
}
if (!multi && 'sort' in model && model.sort != null) {
document.sort = (0, sort_1.formatSort)(model.sort);
}
return document;
}
/**
* Build the replace one operation.
* @param model - The replace one model.
* @param index - The namespace index.
* @returns the operation.
*/
const buildReplaceOneOperation = (model, index) => {
if ((0, utils_1.hasAtomicOperators)(model.replacement)) {
throw new error_1.MongoAPIError('Client bulk write replace models must not contain atomic modifiers (start with $) and must not be empty.');
}
const document = {
update: index,
multi: false,
filter: model.filter,
updateMods: model.replacement
};
if (model.hint) {
document.hint = model.hint;
}
if (model.upsert) {
document.upsert = model.upsert;
}
if (model.collation) {
document.collation = model.collation;
}
if (model.sort != null) {
document.sort = (0, sort_1.formatSort)(model.sort);
}
return document;
};
exports.buildReplaceOneOperation = buildReplaceOneOperation;
/** @internal */
function buildOperation(model, index, pkFactory, options) {
switch (model.name) {
case 'insertOne':
return (0, exports.buildInsertOneOperation)(model, index, pkFactory);
case 'deleteOne':
return (0, exports.buildDeleteOneOperation)(model, index);
case 'deleteMany':
return (0, exports.buildDeleteManyOperation)(model, index);
case 'updateOne':
return (0, exports.buildUpdateOneOperation)(model, index, options);
case 'updateMany':
return (0, exports.buildUpdateManyOperation)(model, index, options);
case 'replaceOne':
return (0, exports.buildReplaceOneOperation)(model, index);
}
}
//# sourceMappingURL=command_builder.js.map

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,3 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
//# sourceMappingURL=common.js.map

View file

@ -0,0 +1 @@
{"version":3,"file":"common.js","sourceRoot":"","sources":["../../../src/operations/client_bulk_write/common.ts"],"names":[],"mappings":""}

View file

@ -0,0 +1,120 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ClientBulkWriteExecutor = void 0;
const abstract_cursor_1 = require("../../cursor/abstract_cursor");
const client_bulk_write_cursor_1 = require("../../cursor/client_bulk_write_cursor");
const error_1 = require("../../error");
const timeout_1 = require("../../timeout");
const utils_1 = require("../../utils");
const write_concern_1 = require("../../write_concern");
const execute_operation_1 = require("../execute_operation");
const client_bulk_write_1 = require("./client_bulk_write");
const command_builder_1 = require("./command_builder");
const results_merger_1 = require("./results_merger");
/**
* Responsible for executing a client bulk write.
* @internal
*/
class ClientBulkWriteExecutor {
/**
* Instantiate the executor.
* @param client - The mongo client.
* @param operations - The user supplied bulk write models.
* @param options - The bulk write options.
*/
constructor(client, operations, options) {
if (operations.length === 0) {
throw new error_1.MongoClientBulkWriteExecutionError('No client bulk write models were provided.');
}
this.client = client;
this.operations = operations;
this.options = {
ordered: true,
bypassDocumentValidation: false,
verboseResults: false,
...options
};
// If no write concern was provided, we inherit one from the client.
if (!this.options.writeConcern) {
this.options.writeConcern = write_concern_1.WriteConcern.fromOptions(this.client.s.options);
}
if (this.options.writeConcern?.w === 0) {
if (this.options.verboseResults) {
throw new error_1.MongoInvalidArgumentError('Cannot request unacknowledged write concern and verbose results');
}
if (this.options.ordered) {
throw new error_1.MongoInvalidArgumentError('Cannot request unacknowledged write concern and ordered writes');
}
}
}
/**
* Execute the client bulk write. Will split commands into batches and exhaust the cursors
* for each, then merge the results into one.
* @returns The result.
*/
async execute() {
// The command builder will take the user provided models and potential split the batch
// into multiple commands due to size.
const pkFactory = this.client.s.options.pkFactory;
const commandBuilder = new command_builder_1.ClientBulkWriteCommandBuilder(this.operations, this.options, pkFactory);
// Unacknowledged writes need to execute all batches and return { ok: 1}
const resolvedOptions = (0, utils_1.resolveTimeoutOptions)(this.client, this.options);
const context = timeout_1.TimeoutContext.create(resolvedOptions);
if (this.options.writeConcern?.w === 0) {
while (commandBuilder.hasNextBatch()) {
const operation = new client_bulk_write_1.ClientBulkWriteOperation(commandBuilder, this.options);
await (0, execute_operation_1.executeOperation)(this.client, operation, context);
}
return results_merger_1.ClientBulkWriteResultsMerger.unacknowledged();
}
else {
const resultsMerger = new results_merger_1.ClientBulkWriteResultsMerger(this.options);
// For each command will will create and exhaust a cursor for the results.
while (commandBuilder.hasNextBatch()) {
const cursorContext = new abstract_cursor_1.CursorTimeoutContext(context, Symbol());
const options = {
...this.options,
timeoutContext: cursorContext,
...(resolvedOptions.timeoutMS != null && { timeoutMode: abstract_cursor_1.CursorTimeoutMode.LIFETIME })
};
const cursor = new client_bulk_write_cursor_1.ClientBulkWriteCursor(this.client, commandBuilder, options);
try {
await resultsMerger.merge(cursor);
}
catch (error) {
// Write concern errors are recorded in the writeConcernErrors field on MongoClientBulkWriteError.
// When a write concern error is encountered, it should not terminate execution of the bulk write
// for either ordered or unordered bulk writes. However, drivers MUST throw an exception at the end
// of execution if any write concern errors were observed.
if (error instanceof error_1.MongoServerError && !(error instanceof error_1.MongoClientBulkWriteError)) {
// Server side errors need to be wrapped inside a MongoClientBulkWriteError, where the root
// cause is the error property and a partial result is to be included.
const bulkWriteError = new error_1.MongoClientBulkWriteError({
message: 'Mongo client bulk write encountered an error during execution'
});
bulkWriteError.cause = error;
bulkWriteError.partialResult = resultsMerger.bulkWriteResult;
throw bulkWriteError;
}
else {
// Client side errors are just thrown.
throw error;
}
}
}
// If we have write concern errors or unordered write errors at the end we throw.
if (resultsMerger.writeConcernErrors.length > 0 || resultsMerger.writeErrors.size > 0) {
const error = new error_1.MongoClientBulkWriteError({
message: 'Mongo client bulk write encountered errors during execution.'
});
error.writeConcernErrors = resultsMerger.writeConcernErrors;
error.writeErrors = resultsMerger.writeErrors;
error.partialResult = resultsMerger.bulkWriteResult;
throw error;
}
return resultsMerger.bulkWriteResult;
}
}
}
exports.ClientBulkWriteExecutor = ClientBulkWriteExecutor;
//# sourceMappingURL=executor.js.map

View file

@ -0,0 +1 @@
{"version":3,"file":"executor.js","sourceRoot":"","sources":["../../../src/operations/client_bulk_write/executor.ts"],"names":[],"mappings":";;;AACA,kEAAuF;AACvF,oFAA8E;AAC9E,uCAKqB;AAErB,2CAA+C;AAC/C,uCAAoD;AACpD,uDAAmD;AACnD,4DAAwD;AACxD,2DAA+D;AAC/D,uDAAkE;AAMlE,qDAAgE;AAEhE;;;GAGG;AACH,MAAa,uBAAuB;IAKlC;;;;;OAKG;IACH,YACE,MAAmB,EACnB,UAA4D,EAC5D,OAAgC;QAEhC,IAAI,UAAU,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YAC5B,MAAM,IAAI,0CAAkC,CAAC,4CAA4C,CAAC,CAAC;QAC7F,CAAC;QAED,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;QACrB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;QAC7B,IAAI,CAAC,OAAO,GAAG;YACb,OAAO,EAAE,IAAI;YACb,wBAAwB,EAAE,KAAK;YAC/B,cAAc,EAAE,KAAK;YACrB,GAAG,OAAO;SACX,CAAC;QAEF,oEAAoE;QACpE,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,YAAY,EAAE,CAAC;YAC/B,IAAI,CAAC,OAAO,CAAC,YAAY,GAAG,4BAAY,CAAC,WAAW,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC;QAC9E,CAAC;QAED,IAAI,IAAI,CAAC,OAAO,CAAC,YAAY,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC;YACvC,IAAI,IAAI,CAAC,OAAO,CAAC,cAAc,EAAE,CAAC;gBAChC,MAAM,IAAI,iCAAyB,CACjC,iEAAiE,CAClE,CAAC;YACJ,CAAC;YAED,IAAI,IAAI,CAAC,OAAO,CAAC,OAAO,EAAE,CAAC;gBACzB,MAAM,IAAI,iCAAyB,CACjC,gEAAgE,CACjE,CAAC;YACJ,CAAC;QACH,CAAC;IACH,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,OAAO;QACX,uFAAuF;QACvF,sCAAsC;QACtC,MAAM,SAAS,GAAG,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,SAAS,CAAC;QAClD,MAAM,cAAc,GAAG,IAAI,+CAA6B,CACtD,IAAI,CAAC,UAAU,EACf,IAAI,CAAC,OAAO,EACZ,SAAS,CACV,CAAC;QACF,wEAAwE;QACxE,MAAM,eAAe,GAAG,IAAA,6BAAqB,EAAC,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,OAAO,CAAC,CAAC;QACzE,MAAM,OAAO,GAAG,wBAAc,CAAC,MAAM,CAAC,eAAe,CAAC,CAAC;QAEvD,IAAI,IAAI,CAAC,OAAO,CAAC,YAAY,EAAE,CAAC,KAAK,CAAC,EAAE,CAAC;YACvC,OAAO,cAAc,CAAC,YAAY,EAAE,EAAE,CAAC;gBACrC,MAAM,SAAS,GAAG,IAAI,4CAAwB,CAAC,cAAc,EAAE,IAAI,CAAC,OAAO,CAAC,CAAC;gBAC7E,MAAM,IAAA,oCAAgB,EAAC,IAAI,CAAC,MAAM,EAAE,SAAS,EAAE,OAAO,CAAC,CAAC;YAC1D,CAAC;YACD,OAAO,6CAA4B,CAAC,cAAc,EAAE,CAAC;QACvD,CAAC;aAAM,CAAC;YACN,MAAM,aAAa,GAAG,IAAI,6CAA4B,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;YACrE,0EAA0E;YAC1E,OAAO,cAAc,CAAC,YAAY,EAAE,EAAE,CAAC;gBACrC,MAAM,aAAa,GAAG,IAAI,sCAAoB,CAAC,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;gBAClE,MAAM,OAAO,GAAG;oBACd,GAAG,IAAI,CAAC,OAAO;oBACf,cAAc,EAAE,aAAa;oBAC7B,GAAG,CAAC,eAAe,CAAC,SAAS,IAAI,IAAI,IAAI,EAAE,WAAW,EAAE,mCAAiB,CAAC,QAAQ,EAAE,CAAC;iBACtF,CAAC;gBACF,MAAM,MAAM,GAAG,IAAI,gDAAqB,CAAC,IAAI,CAAC,MAAM,EAAE,cAAc,EAAE,OAAO,CAAC,CAAC;gBAC/E,IAAI,CAAC;oBACH,MAAM,aAAa,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;gBACpC,CAAC;gBAAC,OAAO,KAAK,EAAE,CAAC;oBACf,kGAAkG;oBAClG,iGAAiG;oBACjG,mGAAmG;oBACnG,0DAA0D;oBAC1D,IAAI,KAAK,YAAY,wBAAgB,IAAI,CAAC,CAAC,KAAK,YAAY,iCAAyB,CAAC,EAAE,CAAC;wBACvF,2FAA2F;wBAC3F,sEAAsE;wBACtE,MAAM,cAAc,GAAG,IAAI,iCAAyB,CAAC;4BACnD,OAAO,EAAE,+DAA+D;yBACzE,CAAC,CAAC;wBACH,cAAc,CAAC,KAAK,GAAG,KAAK,CAAC;wBAC7B,cAAc,CAAC,aAAa,GAAG,aAAa,CAAC,eAAe,CAAC;wBAC7D,MAAM,cAAc,CAAC;oBACvB,CAAC;yBAAM,CAAC;wBACN,sCAAsC;wBACtC,MAAM,KAAK,CAAC;oBACd,CAAC;gBACH,CAAC;YACH,CAAC;YAED,iFAAiF;YACjF,IAAI,aAAa,CAAC,kBAAkB,CAAC,MAAM,GAAG,CAAC,IAAI,aAAa,CAAC,WAAW,CAAC,IAAI,GAAG,CAAC,EAAE,CAAC;gBACtF,MAAM,KAAK,GAAG,IAAI,iCAAyB,CAAC;oBAC1C,OAAO,EAAE,8DAA8D;iBACxE,CAAC,CAAC;gBACH,KAAK,CAAC,kBAAkB,GAAG,aAAa,CAAC,kBAAkB,CAAC;gBAC5D,KAAK,CAAC,WAAW,GAAG,aAAa,CAAC,WAAW,CAAC;gBAC9C,KAAK,CAAC,aAAa,GAAG,aAAa,CAAC,eAAe,CAAC;gBACpD,MAAM,KAAK,CAAC;YACd,CAAC;YAED,OAAO,aAAa,CAAC,eAAe,CAAC;QACvC,CAAC;IACH,CAAC;CACF;AAzHD,0DAyHC"}

View file

@ -0,0 +1,204 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ClientBulkWriteResultsMerger = void 0;
const __1 = require("../..");
const error_1 = require("../../error");
/**
* Unacknowledged bulk writes are always the same.
*/
const UNACKNOWLEDGED = {
acknowledged: false,
insertedCount: 0,
upsertedCount: 0,
matchedCount: 0,
modifiedCount: 0,
deletedCount: 0,
insertResults: undefined,
updateResults: undefined,
deleteResults: undefined
};
/**
* Merges client bulk write cursor responses together into a single result.
* @internal
*/
class ClientBulkWriteResultsMerger {
/**
* @returns The standard unacknowledged bulk write result.
*/
static unacknowledged() {
return UNACKNOWLEDGED;
}
/**
* Instantiate the merger.
* @param options - The options.
*/
constructor(options) {
this.options = options;
this.currentBatchOffset = 0;
this.writeConcernErrors = [];
this.writeErrors = new Map();
this.result = {
acknowledged: true,
insertedCount: 0,
upsertedCount: 0,
matchedCount: 0,
modifiedCount: 0,
deletedCount: 0,
insertResults: undefined,
updateResults: undefined,
deleteResults: undefined
};
if (options.verboseResults) {
this.result.insertResults = new Map();
this.result.updateResults = new Map();
this.result.deleteResults = new Map();
}
}
/**
* Get the bulk write result object.
*/
get bulkWriteResult() {
return {
acknowledged: this.result.acknowledged,
insertedCount: this.result.insertedCount,
upsertedCount: this.result.upsertedCount,
matchedCount: this.result.matchedCount,
modifiedCount: this.result.modifiedCount,
deletedCount: this.result.deletedCount,
insertResults: this.result.insertResults,
updateResults: this.result.updateResults,
deleteResults: this.result.deleteResults
};
}
/**
* Merge the results in the cursor to the existing result.
* @param currentBatchOffset - The offset index to the original models.
* @param response - The cursor response.
* @param documents - The documents in the cursor.
* @returns The current result.
*/
async merge(cursor) {
let writeConcernErrorResult;
try {
for await (const document of cursor) {
// Only add to maps if ok: 1
if (document.ok === 1) {
if (this.options.verboseResults) {
this.processDocument(cursor, document);
}
}
else {
// If an individual write error is encountered during an ordered bulk write, drivers MUST
// record the error in writeErrors and immediately throw the exception. Otherwise, drivers
// MUST continue to iterate the results cursor and execute any further bulkWrite batches.
if (this.options.ordered) {
const error = new error_1.MongoClientBulkWriteError({
message: 'Mongo client ordered bulk write encountered a write error.'
});
error.writeErrors.set(document.idx + this.currentBatchOffset, {
code: document.code,
message: document.errmsg
});
error.partialResult = this.result;
throw error;
}
else {
this.writeErrors.set(document.idx + this.currentBatchOffset, {
code: document.code,
message: document.errmsg
});
}
}
}
}
catch (error) {
if (error instanceof __1.MongoWriteConcernError) {
const result = error.result;
writeConcernErrorResult = {
insertedCount: result.nInserted,
upsertedCount: result.nUpserted,
matchedCount: result.nMatched,
modifiedCount: result.nModified,
deletedCount: result.nDeleted,
writeConcernError: result.writeConcernError
};
if (this.options.verboseResults && result.cursor.firstBatch) {
for (const document of result.cursor.firstBatch) {
if (document.ok === 1) {
this.processDocument(cursor, document);
}
}
}
}
else {
throw error;
}
}
finally {
// Update the counts from the cursor response.
if (cursor.response) {
const response = cursor.response;
this.incrementCounts(response);
}
// Increment the batch offset.
this.currentBatchOffset += cursor.operations.length;
}
// If we have write concern errors ensure they are added.
if (writeConcernErrorResult) {
const writeConcernError = writeConcernErrorResult.writeConcernError;
this.incrementCounts(writeConcernErrorResult);
this.writeConcernErrors.push({
code: writeConcernError.code,
message: writeConcernError.errmsg
});
}
return this.result;
}
/**
* Process an individual document in the results.
* @param cursor - The cursor.
* @param document - The document to process.
*/
processDocument(cursor, document) {
// Get the corresponding operation from the command.
const operation = cursor.operations[document.idx];
// Handle insert results.
if ('insert' in operation) {
this.result.insertResults?.set(document.idx + this.currentBatchOffset, {
insertedId: operation.document._id
});
}
// Handle update results.
if ('update' in operation) {
const result = {
matchedCount: document.n,
modifiedCount: document.nModified ?? 0,
// Check if the bulk did actually upsert.
didUpsert: document.upserted != null
};
if (document.upserted) {
result.upsertedId = document.upserted._id;
}
this.result.updateResults?.set(document.idx + this.currentBatchOffset, result);
}
// Handle delete results.
if ('delete' in operation) {
this.result.deleteResults?.set(document.idx + this.currentBatchOffset, {
deletedCount: document.n
});
}
}
/**
* Increment the result counts.
* @param document - The document with the results.
*/
incrementCounts(document) {
this.result.insertedCount += document.insertedCount;
this.result.upsertedCount += document.upsertedCount;
this.result.matchedCount += document.matchedCount;
this.result.modifiedCount += document.modifiedCount;
this.result.deletedCount += document.deletedCount;
}
}
exports.ClientBulkWriteResultsMerger = ClientBulkWriteResultsMerger;
//# sourceMappingURL=results_merger.js.map

File diff suppressed because one or more lines are too long