Commit
This commit is contained in:
commit
d1c8cae2c1
1417 changed files with 326736 additions and 0 deletions
49
node_modules/mongodb/lib/sdam/common.js
generated
vendored
Normal file
49
node_modules/mongodb/lib/sdam/common.js
generated
vendored
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ServerType = exports.TopologyType = exports.STATE_CONNECTED = exports.STATE_CONNECTING = exports.STATE_CLOSED = exports.STATE_CLOSING = void 0;
|
||||
exports._advanceClusterTime = _advanceClusterTime;
|
||||
// shared state names
|
||||
exports.STATE_CLOSING = 'closing';
|
||||
exports.STATE_CLOSED = 'closed';
|
||||
exports.STATE_CONNECTING = 'connecting';
|
||||
exports.STATE_CONNECTED = 'connected';
|
||||
/**
|
||||
* An enumeration of topology types we know about
|
||||
* @public
|
||||
*/
|
||||
exports.TopologyType = Object.freeze({
|
||||
Single: 'Single',
|
||||
ReplicaSetNoPrimary: 'ReplicaSetNoPrimary',
|
||||
ReplicaSetWithPrimary: 'ReplicaSetWithPrimary',
|
||||
Sharded: 'Sharded',
|
||||
Unknown: 'Unknown',
|
||||
LoadBalanced: 'LoadBalanced'
|
||||
});
|
||||
/**
|
||||
* An enumeration of server types we know about
|
||||
* @public
|
||||
*/
|
||||
exports.ServerType = Object.freeze({
|
||||
Standalone: 'Standalone',
|
||||
Mongos: 'Mongos',
|
||||
PossiblePrimary: 'PossiblePrimary',
|
||||
RSPrimary: 'RSPrimary',
|
||||
RSSecondary: 'RSSecondary',
|
||||
RSArbiter: 'RSArbiter',
|
||||
RSOther: 'RSOther',
|
||||
RSGhost: 'RSGhost',
|
||||
Unknown: 'Unknown',
|
||||
LoadBalancer: 'LoadBalancer'
|
||||
});
|
||||
/** Shared function to determine clusterTime for a given topology or session */
|
||||
function _advanceClusterTime(entity, $clusterTime) {
|
||||
if (entity.clusterTime == null) {
|
||||
entity.clusterTime = $clusterTime;
|
||||
}
|
||||
else {
|
||||
if ($clusterTime.clusterTime.greaterThan(entity.clusterTime.clusterTime)) {
|
||||
entity.clusterTime = $clusterTime;
|
||||
}
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=common.js.map
|
||||
1
node_modules/mongodb/lib/sdam/common.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/common.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"common.js","sourceRoot":"","sources":["../../src/sdam/common.ts"],"names":[],"mappings":";;;AA8DA,kDAWC;AArED,qBAAqB;AACR,QAAA,aAAa,GAAG,SAAS,CAAC;AAC1B,QAAA,YAAY,GAAG,QAAQ,CAAC;AACxB,QAAA,gBAAgB,GAAG,YAAY,CAAC;AAChC,QAAA,eAAe,GAAG,WAAW,CAAC;AAE3C;;;GAGG;AACU,QAAA,YAAY,GAAG,MAAM,CAAC,MAAM,CAAC;IACxC,MAAM,EAAE,QAAQ;IAChB,mBAAmB,EAAE,qBAAqB;IAC1C,qBAAqB,EAAE,uBAAuB;IAC9C,OAAO,EAAE,SAAS;IAClB,OAAO,EAAE,SAAS;IAClB,YAAY,EAAE,cAAc;CACpB,CAAC,CAAC;AAKZ;;;GAGG;AACU,QAAA,UAAU,GAAG,MAAM,CAAC,MAAM,CAAC;IACtC,UAAU,EAAE,YAAY;IACxB,MAAM,EAAE,QAAQ;IAChB,eAAe,EAAE,iBAAiB;IAClC,SAAS,EAAE,WAAW;IACtB,WAAW,EAAE,aAAa;IAC1B,SAAS,EAAE,WAAW;IACtB,OAAO,EAAE,SAAS;IAClB,OAAO,EAAE,SAAS;IAClB,OAAO,EAAE,SAAS;IAClB,YAAY,EAAE,cAAc;CACpB,CAAC,CAAC;AAoBZ,+EAA+E;AAC/E,SAAgB,mBAAmB,CACjC,MAAgC,EAChC,YAAyB;IAEzB,IAAI,MAAM,CAAC,WAAW,IAAI,IAAI,EAAE,CAAC;QAC/B,MAAM,CAAC,WAAW,GAAG,YAAY,CAAC;IACpC,CAAC;SAAM,CAAC;QACN,IAAI,YAAY,CAAC,WAAW,CAAC,WAAW,CAAC,MAAM,CAAC,WAAW,CAAC,WAAW,CAAC,EAAE,CAAC;YACzE,MAAM,CAAC,WAAW,GAAG,YAAY,CAAC;QACpC,CAAC;IACH,CAAC;AACH,CAAC"}
|
||||
146
node_modules/mongodb/lib/sdam/events.js
generated
vendored
Normal file
146
node_modules/mongodb/lib/sdam/events.js
generated
vendored
Normal file
|
|
@ -0,0 +1,146 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ServerHeartbeatFailedEvent = exports.ServerHeartbeatSucceededEvent = exports.ServerHeartbeatStartedEvent = exports.TopologyClosedEvent = exports.TopologyOpeningEvent = exports.TopologyDescriptionChangedEvent = exports.ServerClosedEvent = exports.ServerOpeningEvent = exports.ServerDescriptionChangedEvent = void 0;
|
||||
const constants_1 = require("../constants");
|
||||
/**
|
||||
* Emitted when server description changes, but does NOT include changes to the RTT.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ServerDescriptionChangedEvent {
|
||||
/** @internal */
|
||||
constructor(topologyId, address, previousDescription, newDescription) {
|
||||
this.name = constants_1.SERVER_DESCRIPTION_CHANGED;
|
||||
this.topologyId = topologyId;
|
||||
this.address = address;
|
||||
this.previousDescription = previousDescription;
|
||||
this.newDescription = newDescription;
|
||||
}
|
||||
}
|
||||
exports.ServerDescriptionChangedEvent = ServerDescriptionChangedEvent;
|
||||
/**
|
||||
* Emitted when server is initialized.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ServerOpeningEvent {
|
||||
/** @internal */
|
||||
constructor(topologyId, address) {
|
||||
/** @internal */
|
||||
this.name = constants_1.SERVER_OPENING;
|
||||
this.topologyId = topologyId;
|
||||
this.address = address;
|
||||
}
|
||||
}
|
||||
exports.ServerOpeningEvent = ServerOpeningEvent;
|
||||
/**
|
||||
* Emitted when server is closed.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ServerClosedEvent {
|
||||
/** @internal */
|
||||
constructor(topologyId, address) {
|
||||
/** @internal */
|
||||
this.name = constants_1.SERVER_CLOSED;
|
||||
this.topologyId = topologyId;
|
||||
this.address = address;
|
||||
}
|
||||
}
|
||||
exports.ServerClosedEvent = ServerClosedEvent;
|
||||
/**
|
||||
* Emitted when topology description changes.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class TopologyDescriptionChangedEvent {
|
||||
/** @internal */
|
||||
constructor(topologyId, previousDescription, newDescription) {
|
||||
/** @internal */
|
||||
this.name = constants_1.TOPOLOGY_DESCRIPTION_CHANGED;
|
||||
this.topologyId = topologyId;
|
||||
this.previousDescription = previousDescription;
|
||||
this.newDescription = newDescription;
|
||||
}
|
||||
}
|
||||
exports.TopologyDescriptionChangedEvent = TopologyDescriptionChangedEvent;
|
||||
/**
|
||||
* Emitted when topology is initialized.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class TopologyOpeningEvent {
|
||||
/** @internal */
|
||||
constructor(topologyId) {
|
||||
/** @internal */
|
||||
this.name = constants_1.TOPOLOGY_OPENING;
|
||||
this.topologyId = topologyId;
|
||||
}
|
||||
}
|
||||
exports.TopologyOpeningEvent = TopologyOpeningEvent;
|
||||
/**
|
||||
* Emitted when topology is closed.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class TopologyClosedEvent {
|
||||
/** @internal */
|
||||
constructor(topologyId) {
|
||||
/** @internal */
|
||||
this.name = constants_1.TOPOLOGY_CLOSED;
|
||||
this.topologyId = topologyId;
|
||||
}
|
||||
}
|
||||
exports.TopologyClosedEvent = TopologyClosedEvent;
|
||||
/**
|
||||
* Emitted when the server monitor’s hello command is started - immediately before
|
||||
* the hello command is serialized into raw BSON and written to the socket.
|
||||
*
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ServerHeartbeatStartedEvent {
|
||||
/** @internal */
|
||||
constructor(connectionId, awaited) {
|
||||
/** @internal */
|
||||
this.name = constants_1.SERVER_HEARTBEAT_STARTED;
|
||||
this.connectionId = connectionId;
|
||||
this.awaited = awaited;
|
||||
}
|
||||
}
|
||||
exports.ServerHeartbeatStartedEvent = ServerHeartbeatStartedEvent;
|
||||
/**
|
||||
* Emitted when the server monitor’s hello succeeds.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ServerHeartbeatSucceededEvent {
|
||||
/** @internal */
|
||||
constructor(connectionId, duration, reply, awaited) {
|
||||
/** @internal */
|
||||
this.name = constants_1.SERVER_HEARTBEAT_SUCCEEDED;
|
||||
this.connectionId = connectionId;
|
||||
this.duration = duration;
|
||||
this.reply = reply ?? {};
|
||||
this.awaited = awaited;
|
||||
}
|
||||
}
|
||||
exports.ServerHeartbeatSucceededEvent = ServerHeartbeatSucceededEvent;
|
||||
/**
|
||||
* Emitted when the server monitor’s hello fails, either with an “ok: 0” or a socket exception.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ServerHeartbeatFailedEvent {
|
||||
/** @internal */
|
||||
constructor(connectionId, duration, failure, awaited) {
|
||||
/** @internal */
|
||||
this.name = constants_1.SERVER_HEARTBEAT_FAILED;
|
||||
this.connectionId = connectionId;
|
||||
this.duration = duration;
|
||||
this.failure = failure;
|
||||
this.awaited = awaited;
|
||||
}
|
||||
}
|
||||
exports.ServerHeartbeatFailedEvent = ServerHeartbeatFailedEvent;
|
||||
//# sourceMappingURL=events.js.map
|
||||
1
node_modules/mongodb/lib/sdam/events.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/events.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"events.js","sourceRoot":"","sources":["../../src/sdam/events.ts"],"names":[],"mappings":";;;AACA,4CAUsB;AAItB;;;;GAIG;AACH,MAAa,6BAA6B;IAWxC,gBAAgB;IAChB,YACE,UAAkB,EAClB,OAAe,EACf,mBAAsC,EACtC,cAAiC;QAPnC,SAAI,GAAG,sCAA0B,CAAC;QAShC,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;QAC7B,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;QACvB,IAAI,CAAC,mBAAmB,GAAG,mBAAmB,CAAC;QAC/C,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;IACvC,CAAC;CACF;AAvBD,sEAuBC;AAED;;;;GAIG;AACH,MAAa,kBAAkB;IAQ7B,gBAAgB;IAChB,YAAY,UAAkB,EAAE,OAAe;QAJ/C,gBAAgB;QAChB,SAAI,GAAG,0BAAc,CAAC;QAIpB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;QAC7B,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;CACF;AAbD,gDAaC;AAED;;;;GAIG;AACH,MAAa,iBAAiB;IAQ5B,gBAAgB;IAChB,YAAY,UAAkB,EAAE,OAAe;QAJ/C,gBAAgB;QAChB,SAAI,GAAG,yBAAa,CAAC;QAInB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;QAC7B,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;CACF;AAbD,8CAaC;AAED;;;;GAIG;AACH,MAAa,+BAA+B;IAU1C,gBAAgB;IAChB,YACE,UAAkB,EAClB,mBAAwC,EACxC,cAAmC;QAPrC,gBAAgB;QAChB,SAAI,GAAG,wCAA4B,CAAC;QAQlC,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;QAC7B,IAAI,CAAC,mBAAmB,GAAG,mBAAmB,CAAC;QAC/C,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;IACvC,CAAC;CACF;AApBD,0EAoBC;AAED;;;;GAIG;AACH,MAAa,oBAAoB;IAM/B,gBAAgB;IAChB,YAAY,UAAkB;QAJ9B,gBAAgB;QAChB,SAAI,GAAG,4BAAgB,CAAC;QAItB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;IAC/B,CAAC;CACF;AAVD,oDAUC;AAED;;;;GAIG;AACH,MAAa,mBAAmB;IAM9B,gBAAgB;IAChB,YAAY,UAAkB;QAJ9B,gBAAgB;QAChB,SAAI,GAAG,2BAAe,CAAC;QAIrB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;IAC/B,CAAC;CACF;AAVD,kDAUC;AAED;;;;;;GAMG;AACH,MAAa,2BAA2B;IAQtC,gBAAgB;IAChB,YAAY,YAAoB,EAAE,OAAgB;QAJlD,gBAAgB;QAChB,SAAI,GAAG,oCAAwB,CAAC;QAI9B,IAAI,CAAC,YAAY,GAAG,YAAY,CAAC;QACjC,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;CACF;AAbD,kEAaC;AAED;;;;GAIG;AACH,MAAa,6BAA6B;IAYxC,gBAAgB;IAChB,YAAY,YAAoB,EAAE,QAAgB,EAAE,KAAsB,EAAE,OAAgB;QAJ5F,gBAAgB;QAChB,SAAI,GAAG,sCAA0B,CAAC;QAIhC,IAAI,CAAC,YAAY,GAAG,YAAY,CAAC;QACjC,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;QACzB,IAAI,CAAC,KAAK,GAAG,KAAK,IAAI,EAAE,CAAC;QACzB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;CACF;AAnBD,sEAmBC;AAED;;;;GAIG;AACH,MAAa,0BAA0B;IAYrC,gBAAgB;IAChB,YAAY,YAAoB,EAAE,QAAgB,EAAE,OAAc,EAAE,OAAgB;QAJpF,gBAAgB;QAChB,SAAI,GAAG,mCAAuB,CAAC;QAI7B,IAAI,CAAC,YAAY,GAAG,YAAY,CAAC;QACjC,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;QACzB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;QACvB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;CACF;AAnBD,gEAmBC"}
|
||||
544
node_modules/mongodb/lib/sdam/monitor.js
generated
vendored
Normal file
544
node_modules/mongodb/lib/sdam/monitor.js
generated
vendored
Normal file
|
|
@ -0,0 +1,544 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.RTTSampler = exports.MonitorInterval = exports.RTTPinger = exports.Monitor = exports.ServerMonitoringMode = void 0;
|
||||
const timers_1 = require("timers");
|
||||
const bson_1 = require("../bson");
|
||||
const connect_1 = require("../cmap/connect");
|
||||
const client_metadata_1 = require("../cmap/handshake/client_metadata");
|
||||
const constants_1 = require("../constants");
|
||||
const error_1 = require("../error");
|
||||
const mongo_logger_1 = require("../mongo_logger");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const utils_1 = require("../utils");
|
||||
const common_1 = require("./common");
|
||||
const events_1 = require("./events");
|
||||
const server_1 = require("./server");
|
||||
const STATE_IDLE = 'idle';
|
||||
const STATE_MONITORING = 'monitoring';
|
||||
const stateTransition = (0, utils_1.makeStateMachine)({
|
||||
[common_1.STATE_CLOSING]: [common_1.STATE_CLOSING, STATE_IDLE, common_1.STATE_CLOSED],
|
||||
[common_1.STATE_CLOSED]: [common_1.STATE_CLOSED, STATE_MONITORING],
|
||||
[STATE_IDLE]: [STATE_IDLE, STATE_MONITORING, common_1.STATE_CLOSING],
|
||||
[STATE_MONITORING]: [STATE_MONITORING, STATE_IDLE, common_1.STATE_CLOSING]
|
||||
});
|
||||
const INVALID_REQUEST_CHECK_STATES = new Set([common_1.STATE_CLOSING, common_1.STATE_CLOSED, STATE_MONITORING]);
|
||||
function isInCloseState(monitor) {
|
||||
return monitor.s.state === common_1.STATE_CLOSED || monitor.s.state === common_1.STATE_CLOSING;
|
||||
}
|
||||
/** @public */
|
||||
exports.ServerMonitoringMode = Object.freeze({
|
||||
auto: 'auto',
|
||||
poll: 'poll',
|
||||
stream: 'stream'
|
||||
});
|
||||
/** @internal */
|
||||
class Monitor extends mongo_types_1.TypedEventEmitter {
|
||||
constructor(server, options) {
|
||||
super();
|
||||
/** @internal */
|
||||
this.component = mongo_logger_1.MongoLoggableComponent.TOPOLOGY;
|
||||
this.on('error', utils_1.noop);
|
||||
this.server = server;
|
||||
this.connection = null;
|
||||
this.cancellationToken = new mongo_types_1.CancellationToken();
|
||||
this.cancellationToken.setMaxListeners(Infinity);
|
||||
this.monitorId = undefined;
|
||||
this.s = {
|
||||
state: common_1.STATE_CLOSED
|
||||
};
|
||||
this.address = server.description.address;
|
||||
this.options = Object.freeze({
|
||||
connectTimeoutMS: options.connectTimeoutMS ?? 10000,
|
||||
heartbeatFrequencyMS: options.heartbeatFrequencyMS ?? 10000,
|
||||
minHeartbeatFrequencyMS: options.minHeartbeatFrequencyMS ?? 500,
|
||||
serverMonitoringMode: options.serverMonitoringMode
|
||||
});
|
||||
this.isRunningInFaasEnv = (0, client_metadata_1.getFAASEnv)() != null;
|
||||
this.mongoLogger = this.server.topology.client?.mongoLogger;
|
||||
this.rttSampler = new RTTSampler(10);
|
||||
const cancellationToken = this.cancellationToken;
|
||||
// TODO: refactor this to pull it directly from the pool, requires new ConnectionPool integration
|
||||
const connectOptions = {
|
||||
id: '<monitor>',
|
||||
generation: server.pool.generation,
|
||||
cancellationToken,
|
||||
hostAddress: server.description.hostAddress,
|
||||
...options,
|
||||
// force BSON serialization options
|
||||
raw: false,
|
||||
useBigInt64: false,
|
||||
promoteLongs: true,
|
||||
promoteValues: true,
|
||||
promoteBuffers: true
|
||||
};
|
||||
// ensure no authentication is used for monitoring
|
||||
delete connectOptions.credentials;
|
||||
if (connectOptions.autoEncrypter) {
|
||||
delete connectOptions.autoEncrypter;
|
||||
}
|
||||
this.connectOptions = Object.freeze(connectOptions);
|
||||
}
|
||||
connect() {
|
||||
if (this.s.state !== common_1.STATE_CLOSED) {
|
||||
return;
|
||||
}
|
||||
// start
|
||||
const heartbeatFrequencyMS = this.options.heartbeatFrequencyMS;
|
||||
const minHeartbeatFrequencyMS = this.options.minHeartbeatFrequencyMS;
|
||||
this.monitorId = new MonitorInterval(monitorServer(this), {
|
||||
heartbeatFrequencyMS: heartbeatFrequencyMS,
|
||||
minHeartbeatFrequencyMS: minHeartbeatFrequencyMS,
|
||||
immediate: true
|
||||
});
|
||||
}
|
||||
requestCheck() {
|
||||
if (INVALID_REQUEST_CHECK_STATES.has(this.s.state)) {
|
||||
return;
|
||||
}
|
||||
this.monitorId?.wake();
|
||||
}
|
||||
reset() {
|
||||
const topologyVersion = this.server.description.topologyVersion;
|
||||
if (isInCloseState(this) || topologyVersion == null) {
|
||||
return;
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CLOSING);
|
||||
resetMonitorState(this);
|
||||
// restart monitor
|
||||
stateTransition(this, STATE_IDLE);
|
||||
// restart monitoring
|
||||
const heartbeatFrequencyMS = this.options.heartbeatFrequencyMS;
|
||||
const minHeartbeatFrequencyMS = this.options.minHeartbeatFrequencyMS;
|
||||
this.monitorId = new MonitorInterval(monitorServer(this), {
|
||||
heartbeatFrequencyMS: heartbeatFrequencyMS,
|
||||
minHeartbeatFrequencyMS: minHeartbeatFrequencyMS
|
||||
});
|
||||
}
|
||||
close() {
|
||||
if (isInCloseState(this)) {
|
||||
return;
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CLOSING);
|
||||
resetMonitorState(this);
|
||||
// close monitor
|
||||
this.emit('close');
|
||||
stateTransition(this, common_1.STATE_CLOSED);
|
||||
}
|
||||
get roundTripTime() {
|
||||
return this.rttSampler.average();
|
||||
}
|
||||
get minRoundTripTime() {
|
||||
return this.rttSampler.min();
|
||||
}
|
||||
get latestRtt() {
|
||||
return this.rttSampler.last;
|
||||
}
|
||||
addRttSample(rtt) {
|
||||
this.rttSampler.addSample(rtt);
|
||||
}
|
||||
clearRttSamples() {
|
||||
this.rttSampler.clear();
|
||||
}
|
||||
}
|
||||
exports.Monitor = Monitor;
|
||||
function resetMonitorState(monitor) {
|
||||
monitor.monitorId?.stop();
|
||||
monitor.monitorId = undefined;
|
||||
monitor.rttPinger?.close();
|
||||
monitor.rttPinger = undefined;
|
||||
monitor.cancellationToken.emit('cancel');
|
||||
monitor.connection?.destroy();
|
||||
monitor.connection = null;
|
||||
monitor.clearRttSamples();
|
||||
}
|
||||
function useStreamingProtocol(monitor, topologyVersion) {
|
||||
// If we have no topology version we always poll no matter
|
||||
// what the user provided, since the server does not support
|
||||
// the streaming protocol.
|
||||
if (topologyVersion == null)
|
||||
return false;
|
||||
const serverMonitoringMode = monitor.options.serverMonitoringMode;
|
||||
if (serverMonitoringMode === exports.ServerMonitoringMode.poll)
|
||||
return false;
|
||||
if (serverMonitoringMode === exports.ServerMonitoringMode.stream)
|
||||
return true;
|
||||
// If we are in auto mode, we need to figure out if we're in a FaaS
|
||||
// environment or not and choose the appropriate mode.
|
||||
if (monitor.isRunningInFaasEnv)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
function checkServer(monitor, callback) {
|
||||
let start;
|
||||
let awaited;
|
||||
const topologyVersion = monitor.server.description.topologyVersion;
|
||||
const isAwaitable = useStreamingProtocol(monitor, topologyVersion);
|
||||
monitor.emitAndLogHeartbeat(server_1.Server.SERVER_HEARTBEAT_STARTED, monitor.server.topology.s.id, undefined, new events_1.ServerHeartbeatStartedEvent(monitor.address, isAwaitable));
|
||||
function onHeartbeatFailed(err) {
|
||||
monitor.connection?.destroy();
|
||||
monitor.connection = null;
|
||||
monitor.emitAndLogHeartbeat(server_1.Server.SERVER_HEARTBEAT_FAILED, monitor.server.topology.s.id, undefined, new events_1.ServerHeartbeatFailedEvent(monitor.address, (0, utils_1.calculateDurationInMs)(start), err, awaited));
|
||||
const error = !(err instanceof error_1.MongoError)
|
||||
? new error_1.MongoError(error_1.MongoError.buildErrorMessage(err), { cause: err })
|
||||
: err;
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.ResetPool);
|
||||
if (error instanceof error_1.MongoNetworkTimeoutError) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.InterruptInUseConnections);
|
||||
}
|
||||
monitor.emit('resetServer', error);
|
||||
callback(err);
|
||||
}
|
||||
function onHeartbeatSucceeded(hello) {
|
||||
if (!('isWritablePrimary' in hello)) {
|
||||
// Provide hello-style response document.
|
||||
hello.isWritablePrimary = hello[constants_1.LEGACY_HELLO_COMMAND];
|
||||
}
|
||||
// NOTE: here we use the latestRtt as this measurement corresponds with the value
|
||||
// obtained for this successful heartbeat, if there is no latestRtt, then we calculate the
|
||||
// duration
|
||||
const duration = isAwaitable && monitor.rttPinger
|
||||
? (monitor.rttPinger.latestRtt ?? (0, utils_1.calculateDurationInMs)(start))
|
||||
: (0, utils_1.calculateDurationInMs)(start);
|
||||
monitor.addRttSample(duration);
|
||||
monitor.emitAndLogHeartbeat(server_1.Server.SERVER_HEARTBEAT_SUCCEEDED, monitor.server.topology.s.id, hello.connectionId, new events_1.ServerHeartbeatSucceededEvent(monitor.address, duration, hello, isAwaitable));
|
||||
if (isAwaitable) {
|
||||
// If we are using the streaming protocol then we immediately issue another 'started'
|
||||
// event, otherwise the "check" is complete and return to the main monitor loop
|
||||
monitor.emitAndLogHeartbeat(server_1.Server.SERVER_HEARTBEAT_STARTED, monitor.server.topology.s.id, undefined, new events_1.ServerHeartbeatStartedEvent(monitor.address, true));
|
||||
// We have not actually sent an outgoing handshake, but when we get the next response we
|
||||
// want the duration to reflect the time since we last heard from the server
|
||||
start = (0, utils_1.now)();
|
||||
}
|
||||
else {
|
||||
monitor.rttPinger?.close();
|
||||
monitor.rttPinger = undefined;
|
||||
callback(undefined, hello);
|
||||
}
|
||||
}
|
||||
const { connection } = monitor;
|
||||
if (connection && !connection.closed) {
|
||||
const { serverApi, helloOk } = connection;
|
||||
const connectTimeoutMS = monitor.options.connectTimeoutMS;
|
||||
const maxAwaitTimeMS = monitor.options.heartbeatFrequencyMS;
|
||||
const cmd = {
|
||||
[serverApi?.version || helloOk ? 'hello' : constants_1.LEGACY_HELLO_COMMAND]: 1,
|
||||
...(isAwaitable && topologyVersion
|
||||
? { maxAwaitTimeMS, topologyVersion: makeTopologyVersion(topologyVersion) }
|
||||
: {})
|
||||
};
|
||||
const options = isAwaitable
|
||||
? {
|
||||
socketTimeoutMS: connectTimeoutMS ? connectTimeoutMS + maxAwaitTimeMS : 0,
|
||||
exhaustAllowed: true
|
||||
}
|
||||
: { socketTimeoutMS: connectTimeoutMS };
|
||||
if (isAwaitable && monitor.rttPinger == null) {
|
||||
monitor.rttPinger = new RTTPinger(monitor);
|
||||
}
|
||||
// Record new start time before sending handshake
|
||||
start = (0, utils_1.now)();
|
||||
if (isAwaitable) {
|
||||
awaited = true;
|
||||
return connection.exhaustCommand((0, utils_1.ns)('admin.$cmd'), cmd, options, (error, hello) => {
|
||||
if (error)
|
||||
return onHeartbeatFailed(error);
|
||||
return onHeartbeatSucceeded(hello);
|
||||
});
|
||||
}
|
||||
awaited = false;
|
||||
connection
|
||||
.command((0, utils_1.ns)('admin.$cmd'), cmd, options)
|
||||
.then(onHeartbeatSucceeded, onHeartbeatFailed);
|
||||
return;
|
||||
}
|
||||
// connecting does an implicit `hello`
|
||||
(async () => {
|
||||
const socket = await (0, connect_1.makeSocket)(monitor.connectOptions);
|
||||
const connection = (0, connect_1.makeConnection)(monitor.connectOptions, socket);
|
||||
// The start time is after socket creation but before the handshake
|
||||
start = (0, utils_1.now)();
|
||||
try {
|
||||
await (0, connect_1.performInitialHandshake)(connection, monitor.connectOptions);
|
||||
return connection;
|
||||
}
|
||||
catch (error) {
|
||||
connection.destroy();
|
||||
throw error;
|
||||
}
|
||||
})().then(connection => {
|
||||
if (isInCloseState(monitor)) {
|
||||
connection.destroy();
|
||||
return;
|
||||
}
|
||||
const duration = (0, utils_1.calculateDurationInMs)(start);
|
||||
monitor.addRttSample(duration);
|
||||
monitor.connection = connection;
|
||||
monitor.emitAndLogHeartbeat(server_1.Server.SERVER_HEARTBEAT_SUCCEEDED, monitor.server.topology.s.id, connection.hello?.connectionId, new events_1.ServerHeartbeatSucceededEvent(monitor.address, duration, connection.hello, useStreamingProtocol(monitor, connection.hello?.topologyVersion)));
|
||||
callback(undefined, connection.hello);
|
||||
}, error => {
|
||||
monitor.connection = null;
|
||||
awaited = false;
|
||||
onHeartbeatFailed(error);
|
||||
});
|
||||
}
|
||||
function monitorServer(monitor) {
|
||||
return (callback) => {
|
||||
if (monitor.s.state === STATE_MONITORING) {
|
||||
process.nextTick(callback);
|
||||
return;
|
||||
}
|
||||
stateTransition(monitor, STATE_MONITORING);
|
||||
function done() {
|
||||
if (!isInCloseState(monitor)) {
|
||||
stateTransition(monitor, STATE_IDLE);
|
||||
}
|
||||
callback();
|
||||
}
|
||||
checkServer(monitor, (err, hello) => {
|
||||
if (err) {
|
||||
// otherwise an error occurred on initial discovery, also bail
|
||||
if (monitor.server.description.type === common_1.ServerType.Unknown) {
|
||||
return done();
|
||||
}
|
||||
}
|
||||
// if the check indicates streaming is supported, immediately reschedule monitoring
|
||||
if (useStreamingProtocol(monitor, hello?.topologyVersion)) {
|
||||
(0, timers_1.setTimeout)(() => {
|
||||
if (!isInCloseState(monitor)) {
|
||||
monitor.monitorId?.wake();
|
||||
}
|
||||
}, 0);
|
||||
}
|
||||
done();
|
||||
});
|
||||
};
|
||||
}
|
||||
function makeTopologyVersion(tv) {
|
||||
return {
|
||||
processId: tv.processId,
|
||||
// tests mock counter as just number, but in a real situation counter should always be a Long
|
||||
// TODO(NODE-2674): Preserve int64 sent from MongoDB
|
||||
counter: bson_1.Long.isLong(tv.counter) ? tv.counter : bson_1.Long.fromNumber(tv.counter)
|
||||
};
|
||||
}
|
||||
/** @internal */
|
||||
class RTTPinger {
|
||||
constructor(monitor) {
|
||||
this.connection = undefined;
|
||||
this.cancellationToken = monitor.cancellationToken;
|
||||
this.closed = false;
|
||||
this.monitor = monitor;
|
||||
this.latestRtt = monitor.latestRtt ?? undefined;
|
||||
const heartbeatFrequencyMS = monitor.options.heartbeatFrequencyMS;
|
||||
this.monitorId = (0, timers_1.setTimeout)(() => this.measureRoundTripTime(), heartbeatFrequencyMS);
|
||||
}
|
||||
get roundTripTime() {
|
||||
return this.monitor.roundTripTime;
|
||||
}
|
||||
get minRoundTripTime() {
|
||||
return this.monitor.minRoundTripTime;
|
||||
}
|
||||
close() {
|
||||
this.closed = true;
|
||||
(0, timers_1.clearTimeout)(this.monitorId);
|
||||
this.connection?.destroy();
|
||||
this.connection = undefined;
|
||||
}
|
||||
measureAndReschedule(start, conn) {
|
||||
if (this.closed) {
|
||||
conn?.destroy();
|
||||
return;
|
||||
}
|
||||
if (this.connection == null) {
|
||||
this.connection = conn;
|
||||
}
|
||||
this.latestRtt = (0, utils_1.calculateDurationInMs)(start);
|
||||
this.monitorId = (0, timers_1.setTimeout)(() => this.measureRoundTripTime(), this.monitor.options.heartbeatFrequencyMS);
|
||||
}
|
||||
measureRoundTripTime() {
|
||||
const start = (0, utils_1.now)();
|
||||
if (this.closed) {
|
||||
return;
|
||||
}
|
||||
const connection = this.connection;
|
||||
if (connection == null) {
|
||||
(0, connect_1.connect)(this.monitor.connectOptions).then(connection => {
|
||||
this.measureAndReschedule(start, connection);
|
||||
}, () => {
|
||||
this.connection = undefined;
|
||||
});
|
||||
return;
|
||||
}
|
||||
const commandName = connection.serverApi?.version || connection.helloOk ? 'hello' : constants_1.LEGACY_HELLO_COMMAND;
|
||||
connection.command((0, utils_1.ns)('admin.$cmd'), { [commandName]: 1 }, undefined).then(() => this.measureAndReschedule(start), () => {
|
||||
this.connection?.destroy();
|
||||
this.connection = undefined;
|
||||
return;
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.RTTPinger = RTTPinger;
|
||||
/**
|
||||
* @internal
|
||||
*/
|
||||
class MonitorInterval {
|
||||
constructor(fn, options = {}) {
|
||||
this.isExpeditedCallToFnScheduled = false;
|
||||
this.stopped = false;
|
||||
this.isExecutionInProgress = false;
|
||||
this.hasExecutedOnce = false;
|
||||
this._executeAndReschedule = () => {
|
||||
if (this.stopped)
|
||||
return;
|
||||
if (this.timerId) {
|
||||
(0, timers_1.clearTimeout)(this.timerId);
|
||||
}
|
||||
this.isExpeditedCallToFnScheduled = false;
|
||||
this.isExecutionInProgress = true;
|
||||
this.fn(() => {
|
||||
this.lastExecutionEnded = (0, utils_1.now)();
|
||||
this.isExecutionInProgress = false;
|
||||
this._reschedule(this.heartbeatFrequencyMS);
|
||||
});
|
||||
};
|
||||
this.fn = fn;
|
||||
this.lastExecutionEnded = -Infinity;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS ?? 1000;
|
||||
this.minHeartbeatFrequencyMS = options.minHeartbeatFrequencyMS ?? 500;
|
||||
if (options.immediate) {
|
||||
this._executeAndReschedule();
|
||||
}
|
||||
else {
|
||||
this._reschedule(undefined);
|
||||
}
|
||||
}
|
||||
wake() {
|
||||
const currentTime = (0, utils_1.now)();
|
||||
const timeSinceLastCall = currentTime - this.lastExecutionEnded;
|
||||
// TODO(NODE-4674): Add error handling and logging to the monitor
|
||||
if (timeSinceLastCall < 0) {
|
||||
return this._executeAndReschedule();
|
||||
}
|
||||
if (this.isExecutionInProgress) {
|
||||
return;
|
||||
}
|
||||
// debounce multiple calls to wake within the `minInterval`
|
||||
if (this.isExpeditedCallToFnScheduled) {
|
||||
return;
|
||||
}
|
||||
// reschedule a call as soon as possible, ensuring the call never happens
|
||||
// faster than the `minInterval`
|
||||
if (timeSinceLastCall < this.minHeartbeatFrequencyMS) {
|
||||
this.isExpeditedCallToFnScheduled = true;
|
||||
this._reschedule(this.minHeartbeatFrequencyMS - timeSinceLastCall);
|
||||
return;
|
||||
}
|
||||
this._executeAndReschedule();
|
||||
}
|
||||
stop() {
|
||||
this.stopped = true;
|
||||
if (this.timerId) {
|
||||
(0, timers_1.clearTimeout)(this.timerId);
|
||||
this.timerId = undefined;
|
||||
}
|
||||
this.lastExecutionEnded = -Infinity;
|
||||
this.isExpeditedCallToFnScheduled = false;
|
||||
}
|
||||
toString() {
|
||||
return JSON.stringify(this);
|
||||
}
|
||||
toJSON() {
|
||||
const currentTime = (0, utils_1.now)();
|
||||
const timeSinceLastCall = currentTime - this.lastExecutionEnded;
|
||||
return {
|
||||
timerId: this.timerId != null ? 'set' : 'cleared',
|
||||
lastCallTime: this.lastExecutionEnded,
|
||||
isExpeditedCheckScheduled: this.isExpeditedCallToFnScheduled,
|
||||
stopped: this.stopped,
|
||||
heartbeatFrequencyMS: this.heartbeatFrequencyMS,
|
||||
minHeartbeatFrequencyMS: this.minHeartbeatFrequencyMS,
|
||||
currentTime,
|
||||
timeSinceLastCall
|
||||
};
|
||||
}
|
||||
_reschedule(ms) {
|
||||
if (this.stopped)
|
||||
return;
|
||||
if (this.timerId) {
|
||||
(0, timers_1.clearTimeout)(this.timerId);
|
||||
}
|
||||
this.timerId = (0, timers_1.setTimeout)(this._executeAndReschedule, ms || this.heartbeatFrequencyMS);
|
||||
}
|
||||
}
|
||||
exports.MonitorInterval = MonitorInterval;
|
||||
/** @internal
|
||||
* This class implements the RTT sampling logic specified for [CSOT](https://github.com/mongodb/specifications/blob/bbb335e60cd7ea1e0f7cd9a9443cb95fc9d3b64d/source/client-side-operations-timeout/client-side-operations-timeout.md#drivers-use-minimum-rtt-to-short-circuit-operations)
|
||||
*
|
||||
* This is implemented as a [circular buffer](https://en.wikipedia.org/wiki/Circular_buffer) keeping
|
||||
* the most recent `windowSize` samples
|
||||
* */
|
||||
class RTTSampler {
|
||||
constructor(windowSize = 10) {
|
||||
this.rttSamples = new Float64Array(windowSize);
|
||||
this.length = 0;
|
||||
this.writeIndex = 0;
|
||||
}
|
||||
/**
|
||||
* Adds an rtt sample to the end of the circular buffer
|
||||
* When `windowSize` samples have been collected, `addSample` overwrites the least recently added
|
||||
* sample
|
||||
*/
|
||||
addSample(sample) {
|
||||
this.rttSamples[this.writeIndex++] = sample;
|
||||
if (this.length < this.rttSamples.length) {
|
||||
this.length++;
|
||||
}
|
||||
this.writeIndex %= this.rttSamples.length;
|
||||
}
|
||||
/**
|
||||
* When \< 2 samples have been collected, returns 0
|
||||
* Otherwise computes the minimum value samples contained in the buffer
|
||||
*/
|
||||
min() {
|
||||
if (this.length < 2)
|
||||
return 0;
|
||||
let min = this.rttSamples[0];
|
||||
for (let i = 1; i < this.length; i++) {
|
||||
if (this.rttSamples[i] < min)
|
||||
min = this.rttSamples[i];
|
||||
}
|
||||
return min;
|
||||
}
|
||||
/**
|
||||
* Returns mean of samples contained in the buffer
|
||||
*/
|
||||
average() {
|
||||
if (this.length === 0)
|
||||
return 0;
|
||||
let sum = 0;
|
||||
for (let i = 0; i < this.length; i++) {
|
||||
sum += this.rttSamples[i];
|
||||
}
|
||||
return sum / this.length;
|
||||
}
|
||||
/**
|
||||
* Returns most recently inserted element in the buffer
|
||||
* Returns null if the buffer is empty
|
||||
* */
|
||||
get last() {
|
||||
if (this.length === 0)
|
||||
return null;
|
||||
return this.rttSamples[this.writeIndex === 0 ? this.length - 1 : this.writeIndex - 1];
|
||||
}
|
||||
/**
|
||||
* Clear the buffer
|
||||
* NOTE: this does not overwrite the data held in the internal array, just the pointers into
|
||||
* this array
|
||||
*/
|
||||
clear() {
|
||||
this.length = 0;
|
||||
this.writeIndex = 0;
|
||||
}
|
||||
}
|
||||
exports.RTTSampler = RTTSampler;
|
||||
//# sourceMappingURL=monitor.js.map
|
||||
1
node_modules/mongodb/lib/sdam/monitor.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/monitor.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
392
node_modules/mongodb/lib/sdam/server.js
generated
vendored
Normal file
392
node_modules/mongodb/lib/sdam/server.js
generated
vendored
Normal file
|
|
@ -0,0 +1,392 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Server = void 0;
|
||||
const connection_1 = require("../cmap/connection");
|
||||
const connection_pool_1 = require("../cmap/connection_pool");
|
||||
const errors_1 = require("../cmap/errors");
|
||||
const constants_1 = require("../constants");
|
||||
const error_1 = require("../error");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const aggregate_1 = require("../operations/aggregate");
|
||||
const transactions_1 = require("../transactions");
|
||||
const utils_1 = require("../utils");
|
||||
const write_concern_1 = require("../write_concern");
|
||||
const common_1 = require("./common");
|
||||
const monitor_1 = require("./monitor");
|
||||
const server_description_1 = require("./server_description");
|
||||
const server_selection_1 = require("./server_selection");
|
||||
const stateTransition = (0, utils_1.makeStateMachine)({
|
||||
[common_1.STATE_CLOSED]: [common_1.STATE_CLOSED, common_1.STATE_CONNECTING],
|
||||
[common_1.STATE_CONNECTING]: [common_1.STATE_CONNECTING, common_1.STATE_CLOSING, common_1.STATE_CONNECTED, common_1.STATE_CLOSED],
|
||||
[common_1.STATE_CONNECTED]: [common_1.STATE_CONNECTED, common_1.STATE_CLOSING, common_1.STATE_CLOSED],
|
||||
[common_1.STATE_CLOSING]: [common_1.STATE_CLOSING, common_1.STATE_CLOSED]
|
||||
});
|
||||
/** @internal */
|
||||
class Server extends mongo_types_1.TypedEventEmitter {
|
||||
/** @event */
|
||||
static { this.SERVER_HEARTBEAT_STARTED = constants_1.SERVER_HEARTBEAT_STARTED; }
|
||||
/** @event */
|
||||
static { this.SERVER_HEARTBEAT_SUCCEEDED = constants_1.SERVER_HEARTBEAT_SUCCEEDED; }
|
||||
/** @event */
|
||||
static { this.SERVER_HEARTBEAT_FAILED = constants_1.SERVER_HEARTBEAT_FAILED; }
|
||||
/** @event */
|
||||
static { this.CONNECT = constants_1.CONNECT; }
|
||||
/** @event */
|
||||
static { this.DESCRIPTION_RECEIVED = constants_1.DESCRIPTION_RECEIVED; }
|
||||
/** @event */
|
||||
static { this.CLOSED = constants_1.CLOSED; }
|
||||
/** @event */
|
||||
static { this.ENDED = constants_1.ENDED; }
|
||||
/**
|
||||
* Create a server
|
||||
*/
|
||||
constructor(topology, description, options) {
|
||||
super();
|
||||
this.on('error', utils_1.noop);
|
||||
this.serverApi = options.serverApi;
|
||||
const poolOptions = { hostAddress: description.hostAddress, ...options };
|
||||
this.topology = topology;
|
||||
this.pool = new connection_pool_1.ConnectionPool(this, poolOptions);
|
||||
this.s = {
|
||||
description,
|
||||
options,
|
||||
state: common_1.STATE_CLOSED,
|
||||
operationCount: 0
|
||||
};
|
||||
for (const event of [...constants_1.CMAP_EVENTS, ...constants_1.APM_EVENTS]) {
|
||||
this.pool.on(event, (e) => this.emit(event, e));
|
||||
}
|
||||
this.pool.on(connection_1.Connection.CLUSTER_TIME_RECEIVED, (clusterTime) => {
|
||||
this.clusterTime = clusterTime;
|
||||
});
|
||||
if (this.loadBalanced) {
|
||||
this.monitor = null;
|
||||
// monitoring is disabled in load balancing mode
|
||||
return;
|
||||
}
|
||||
// create the monitor
|
||||
this.monitor = new monitor_1.Monitor(this, this.s.options);
|
||||
for (const event of constants_1.HEARTBEAT_EVENTS) {
|
||||
this.monitor.on(event, (e) => this.emit(event, e));
|
||||
}
|
||||
this.monitor.on('resetServer', (error) => markServerUnknown(this, error));
|
||||
this.monitor.on(Server.SERVER_HEARTBEAT_SUCCEEDED, (event) => {
|
||||
this.emit(Server.DESCRIPTION_RECEIVED, new server_description_1.ServerDescription(this.description.hostAddress, event.reply, {
|
||||
roundTripTime: this.monitor?.roundTripTime,
|
||||
minRoundTripTime: this.monitor?.minRoundTripTime
|
||||
}));
|
||||
if (this.s.state === common_1.STATE_CONNECTING) {
|
||||
stateTransition(this, common_1.STATE_CONNECTED);
|
||||
this.emit(Server.CONNECT, this);
|
||||
}
|
||||
});
|
||||
}
|
||||
get clusterTime() {
|
||||
return this.topology.clusterTime;
|
||||
}
|
||||
set clusterTime(clusterTime) {
|
||||
this.topology.clusterTime = clusterTime;
|
||||
}
|
||||
get description() {
|
||||
return this.s.description;
|
||||
}
|
||||
get name() {
|
||||
return this.s.description.address;
|
||||
}
|
||||
get autoEncrypter() {
|
||||
if (this.s.options && this.s.options.autoEncrypter) {
|
||||
return this.s.options.autoEncrypter;
|
||||
}
|
||||
return;
|
||||
}
|
||||
get loadBalanced() {
|
||||
return this.topology.description.type === common_1.TopologyType.LoadBalanced;
|
||||
}
|
||||
/**
|
||||
* Initiate server connect
|
||||
*/
|
||||
connect() {
|
||||
if (this.s.state !== common_1.STATE_CLOSED) {
|
||||
return;
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CONNECTING);
|
||||
// If in load balancer mode we automatically set the server to
|
||||
// a load balancer. It never transitions out of this state and
|
||||
// has no monitor.
|
||||
if (!this.loadBalanced) {
|
||||
this.monitor?.connect();
|
||||
}
|
||||
else {
|
||||
stateTransition(this, common_1.STATE_CONNECTED);
|
||||
this.emit(Server.CONNECT, this);
|
||||
}
|
||||
}
|
||||
closeCheckedOutConnections() {
|
||||
return this.pool.closeCheckedOutConnections();
|
||||
}
|
||||
/** Destroy the server connection */
|
||||
close() {
|
||||
if (this.s.state === common_1.STATE_CLOSED) {
|
||||
return;
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CLOSING);
|
||||
if (!this.loadBalanced) {
|
||||
this.monitor?.close();
|
||||
}
|
||||
this.pool.close();
|
||||
stateTransition(this, common_1.STATE_CLOSED);
|
||||
this.emit('closed');
|
||||
}
|
||||
/**
|
||||
* Immediately schedule monitoring of this server. If there already an attempt being made
|
||||
* this will be a no-op.
|
||||
*/
|
||||
requestCheck() {
|
||||
if (!this.loadBalanced) {
|
||||
this.monitor?.requestCheck();
|
||||
}
|
||||
}
|
||||
async command(operation, timeoutContext) {
|
||||
if (this.s.state === common_1.STATE_CLOSING || this.s.state === common_1.STATE_CLOSED) {
|
||||
throw new error_1.MongoServerClosedError();
|
||||
}
|
||||
const session = operation.session;
|
||||
let conn = session?.pinnedConnection;
|
||||
this.incrementOperationCount();
|
||||
if (conn == null) {
|
||||
try {
|
||||
conn = await this.pool.checkOut({ timeoutContext, signal: operation.options.signal });
|
||||
}
|
||||
catch (checkoutError) {
|
||||
this.decrementOperationCount();
|
||||
if (!(checkoutError instanceof errors_1.PoolClearedError))
|
||||
this.handleError(checkoutError);
|
||||
throw checkoutError;
|
||||
}
|
||||
}
|
||||
let reauthPromise = null;
|
||||
const cleanup = () => {
|
||||
this.decrementOperationCount();
|
||||
if (session?.pinnedConnection !== conn) {
|
||||
if (reauthPromise != null) {
|
||||
// The reauth promise only exists if it hasn't thrown.
|
||||
const checkBackIn = () => {
|
||||
this.pool.checkIn(conn);
|
||||
};
|
||||
void reauthPromise.then(checkBackIn, checkBackIn);
|
||||
}
|
||||
else {
|
||||
this.pool.checkIn(conn);
|
||||
}
|
||||
}
|
||||
};
|
||||
let cmd;
|
||||
try {
|
||||
cmd = operation.buildCommand(conn, session);
|
||||
}
|
||||
catch (e) {
|
||||
cleanup();
|
||||
throw e;
|
||||
}
|
||||
const options = operation.buildOptions(timeoutContext);
|
||||
const ns = operation.ns;
|
||||
if (this.loadBalanced && isPinnableCommand(cmd, session) && !session?.pinnedConnection) {
|
||||
session?.pin(conn);
|
||||
}
|
||||
options.directConnection = this.topology.s.options.directConnection;
|
||||
const omitReadPreference = operation instanceof aggregate_1.AggregateOperation &&
|
||||
operation.hasWriteStage &&
|
||||
(0, utils_1.maxWireVersion)(conn) < server_selection_1.MIN_SECONDARY_WRITE_WIRE_VERSION;
|
||||
if (omitReadPreference) {
|
||||
delete options.readPreference;
|
||||
}
|
||||
if (this.description.iscryptd) {
|
||||
options.omitMaxTimeMS = true;
|
||||
}
|
||||
try {
|
||||
try {
|
||||
const res = await conn.command(ns, cmd, options, operation.SERVER_COMMAND_RESPONSE_TYPE);
|
||||
(0, write_concern_1.throwIfWriteConcernError)(res);
|
||||
return res;
|
||||
}
|
||||
catch (commandError) {
|
||||
throw this.decorateCommandError(conn, cmd, options, commandError);
|
||||
}
|
||||
}
|
||||
catch (operationError) {
|
||||
if (operationError instanceof error_1.MongoError &&
|
||||
operationError.code === error_1.MONGODB_ERROR_CODES.Reauthenticate) {
|
||||
reauthPromise = this.pool.reauthenticate(conn);
|
||||
reauthPromise.then(undefined, error => {
|
||||
reauthPromise = null;
|
||||
(0, utils_1.squashError)(error);
|
||||
});
|
||||
await (0, utils_1.abortable)(reauthPromise, options);
|
||||
reauthPromise = null; // only reachable if reauth succeeds
|
||||
try {
|
||||
const res = await conn.command(ns, cmd, options, operation.SERVER_COMMAND_RESPONSE_TYPE);
|
||||
(0, write_concern_1.throwIfWriteConcernError)(res);
|
||||
return res;
|
||||
}
|
||||
catch (commandError) {
|
||||
throw this.decorateCommandError(conn, cmd, options, commandError);
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw operationError;
|
||||
}
|
||||
}
|
||||
finally {
|
||||
cleanup();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Handle SDAM error
|
||||
* @internal
|
||||
*/
|
||||
handleError(error, connection) {
|
||||
if (!(error instanceof error_1.MongoError)) {
|
||||
return;
|
||||
}
|
||||
const isStaleError = error.connectionGeneration && error.connectionGeneration < this.pool.generation;
|
||||
if (isStaleError) {
|
||||
return;
|
||||
}
|
||||
const isNetworkNonTimeoutError = error instanceof error_1.MongoNetworkError && !(error instanceof error_1.MongoNetworkTimeoutError);
|
||||
const isNetworkTimeoutBeforeHandshakeError = error instanceof error_1.MongoNetworkError && error.beforeHandshake;
|
||||
const isAuthHandshakeError = error.hasErrorLabel(error_1.MongoErrorLabel.HandshakeError);
|
||||
if (isNetworkNonTimeoutError || isNetworkTimeoutBeforeHandshakeError || isAuthHandshakeError) {
|
||||
// In load balanced mode we never mark the server as unknown and always
|
||||
// clear for the specific service id.
|
||||
if (!this.loadBalanced) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.ResetPool);
|
||||
markServerUnknown(this, error);
|
||||
}
|
||||
else if (connection) {
|
||||
this.pool.clear({ serviceId: connection.serviceId });
|
||||
}
|
||||
}
|
||||
else {
|
||||
if ((0, error_1.isSDAMUnrecoverableError)(error)) {
|
||||
if (shouldHandleStateChangeError(this, error)) {
|
||||
const shouldClearPool = (0, error_1.isNodeShuttingDownError)(error);
|
||||
if (this.loadBalanced && connection && shouldClearPool) {
|
||||
this.pool.clear({ serviceId: connection.serviceId });
|
||||
}
|
||||
if (!this.loadBalanced) {
|
||||
if (shouldClearPool) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.ResetPool);
|
||||
}
|
||||
markServerUnknown(this, error);
|
||||
process.nextTick(() => this.requestCheck());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Ensure that error is properly decorated and internal state is updated before throwing
|
||||
* @internal
|
||||
*/
|
||||
decorateCommandError(connection, cmd, options, error) {
|
||||
if (typeof error !== 'object' || error == null || !('name' in error)) {
|
||||
throw new error_1.MongoRuntimeError('An unexpected error type: ' + typeof error);
|
||||
}
|
||||
if (error.name === 'AbortError' && 'cause' in error && error.cause instanceof error_1.MongoError) {
|
||||
error = error.cause;
|
||||
}
|
||||
if (!(error instanceof error_1.MongoError)) {
|
||||
// Node.js or some other error we have not special handling for
|
||||
return error;
|
||||
}
|
||||
if (connectionIsStale(this.pool, connection)) {
|
||||
return error;
|
||||
}
|
||||
const session = options?.session;
|
||||
if (error instanceof error_1.MongoNetworkError) {
|
||||
if (session && !session.hasEnded && session.serverSession) {
|
||||
session.serverSession.isDirty = true;
|
||||
}
|
||||
// inActiveTransaction check handles commit and abort.
|
||||
if (inActiveTransaction(session, cmd) &&
|
||||
!error.hasErrorLabel(error_1.MongoErrorLabel.TransientTransactionError)) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.TransientTransactionError);
|
||||
}
|
||||
if ((isRetryableWritesEnabled(this.topology) || (0, transactions_1.isTransactionCommand)(cmd)) &&
|
||||
(0, utils_1.supportsRetryableWrites)(this) &&
|
||||
!inActiveTransaction(session, cmd)) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.RetryableWriteError);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if ((isRetryableWritesEnabled(this.topology) || (0, transactions_1.isTransactionCommand)(cmd)) &&
|
||||
(0, error_1.needsRetryableWriteLabel)(error, (0, utils_1.maxWireVersion)(this), this.description.type) &&
|
||||
!inActiveTransaction(session, cmd)) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.RetryableWriteError);
|
||||
}
|
||||
}
|
||||
if (session &&
|
||||
session.isPinned &&
|
||||
error.hasErrorLabel(error_1.MongoErrorLabel.TransientTransactionError)) {
|
||||
session.unpin({ force: true });
|
||||
}
|
||||
this.handleError(error, connection);
|
||||
return error;
|
||||
}
|
||||
/**
|
||||
* Decrement the operation count, returning the new count.
|
||||
*/
|
||||
decrementOperationCount() {
|
||||
return (this.s.operationCount -= 1);
|
||||
}
|
||||
/**
|
||||
* Increment the operation count, returning the new count.
|
||||
*/
|
||||
incrementOperationCount() {
|
||||
return (this.s.operationCount += 1);
|
||||
}
|
||||
}
|
||||
exports.Server = Server;
|
||||
function markServerUnknown(server, error) {
|
||||
// Load balancer servers can never be marked unknown.
|
||||
if (server.loadBalanced) {
|
||||
return;
|
||||
}
|
||||
if (error instanceof error_1.MongoNetworkError && !(error instanceof error_1.MongoNetworkTimeoutError)) {
|
||||
server.monitor?.reset();
|
||||
}
|
||||
server.emit(Server.DESCRIPTION_RECEIVED, new server_description_1.ServerDescription(server.description.hostAddress, undefined, { error }));
|
||||
}
|
||||
function isPinnableCommand(cmd, session) {
|
||||
if (session) {
|
||||
return (session.inTransaction() ||
|
||||
(session.transaction.isCommitted && 'commitTransaction' in cmd) ||
|
||||
'aggregate' in cmd ||
|
||||
'find' in cmd ||
|
||||
'getMore' in cmd ||
|
||||
'listCollections' in cmd ||
|
||||
'listIndexes' in cmd ||
|
||||
'bulkWrite' in cmd);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function connectionIsStale(pool, connection) {
|
||||
if (connection.serviceId) {
|
||||
return (connection.generation !== pool.serviceGenerations.get(connection.serviceId.toHexString()));
|
||||
}
|
||||
return connection.generation !== pool.generation;
|
||||
}
|
||||
function shouldHandleStateChangeError(server, err) {
|
||||
const etv = err.topologyVersion;
|
||||
const stv = server.description.topologyVersion;
|
||||
return (0, server_description_1.compareTopologyVersion)(stv, etv) < 0;
|
||||
}
|
||||
function inActiveTransaction(session, cmd) {
|
||||
return session && session.inTransaction() && !(0, transactions_1.isTransactionCommand)(cmd);
|
||||
}
|
||||
/** this checks the retryWrites option passed down from the client options, it
|
||||
* does not check if the server supports retryable writes */
|
||||
function isRetryableWritesEnabled(topology) {
|
||||
return topology.s.options.retryWrites !== false;
|
||||
}
|
||||
//# sourceMappingURL=server.js.map
|
||||
1
node_modules/mongodb/lib/sdam/server.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/server.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
204
node_modules/mongodb/lib/sdam/server_description.js
generated
vendored
Normal file
204
node_modules/mongodb/lib/sdam/server_description.js
generated
vendored
Normal file
|
|
@ -0,0 +1,204 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ServerDescription = void 0;
|
||||
exports.parseServerType = parseServerType;
|
||||
exports.compareTopologyVersion = compareTopologyVersion;
|
||||
const bson_1 = require("../bson");
|
||||
const error_1 = require("../error");
|
||||
const utils_1 = require("../utils");
|
||||
const common_1 = require("./common");
|
||||
const WRITABLE_SERVER_TYPES = new Set([
|
||||
common_1.ServerType.RSPrimary,
|
||||
common_1.ServerType.Standalone,
|
||||
common_1.ServerType.Mongos,
|
||||
common_1.ServerType.LoadBalancer
|
||||
]);
|
||||
const DATA_BEARING_SERVER_TYPES = new Set([
|
||||
common_1.ServerType.RSPrimary,
|
||||
common_1.ServerType.RSSecondary,
|
||||
common_1.ServerType.Mongos,
|
||||
common_1.ServerType.Standalone,
|
||||
common_1.ServerType.LoadBalancer
|
||||
]);
|
||||
/**
|
||||
* The client's view of a single server, based on the most recent hello outcome.
|
||||
*
|
||||
* Internal type, not meant to be directly instantiated
|
||||
* @public
|
||||
*/
|
||||
class ServerDescription {
|
||||
/**
|
||||
* Create a ServerDescription
|
||||
* @internal
|
||||
*
|
||||
* @param address - The address of the server
|
||||
* @param hello - An optional hello response for this server
|
||||
*/
|
||||
constructor(address, hello, options = {}) {
|
||||
if (address == null || address === '') {
|
||||
throw new error_1.MongoRuntimeError('ServerDescription must be provided with a non-empty address');
|
||||
}
|
||||
this.address =
|
||||
typeof address === 'string'
|
||||
? utils_1.HostAddress.fromString(address).toString() // Use HostAddress to normalize
|
||||
: address.toString();
|
||||
this.type = parseServerType(hello, options);
|
||||
this.hosts = hello?.hosts?.map((host) => host.toLowerCase()) ?? [];
|
||||
this.passives = hello?.passives?.map((host) => host.toLowerCase()) ?? [];
|
||||
this.arbiters = hello?.arbiters?.map((host) => host.toLowerCase()) ?? [];
|
||||
this.tags = hello?.tags ?? {};
|
||||
this.minWireVersion = hello?.minWireVersion ?? 0;
|
||||
this.maxWireVersion = hello?.maxWireVersion ?? 0;
|
||||
this.roundTripTime = options?.roundTripTime ?? -1;
|
||||
this.minRoundTripTime = options?.minRoundTripTime ?? 0;
|
||||
this.lastUpdateTime = (0, utils_1.now)();
|
||||
this.lastWriteDate = hello?.lastWrite?.lastWriteDate ?? 0;
|
||||
// NOTE: This actually builds the stack string instead of holding onto the getter and all its
|
||||
// associated references. This is done to prevent a memory leak.
|
||||
this.error = options.error ?? null;
|
||||
this.error?.stack;
|
||||
// TODO(NODE-2674): Preserve int64 sent from MongoDB
|
||||
this.topologyVersion = this.error?.topologyVersion ?? hello?.topologyVersion ?? null;
|
||||
this.setName = hello?.setName ?? null;
|
||||
this.setVersion = hello?.setVersion ?? null;
|
||||
this.electionId = hello?.electionId ?? null;
|
||||
this.logicalSessionTimeoutMinutes = hello?.logicalSessionTimeoutMinutes ?? null;
|
||||
this.maxMessageSizeBytes = hello?.maxMessageSizeBytes ?? null;
|
||||
this.maxWriteBatchSize = hello?.maxWriteBatchSize ?? null;
|
||||
this.maxBsonObjectSize = hello?.maxBsonObjectSize ?? null;
|
||||
this.primary = hello?.primary ?? null;
|
||||
this.me = hello?.me?.toLowerCase() ?? null;
|
||||
this.$clusterTime = hello?.$clusterTime ?? null;
|
||||
this.iscryptd = Boolean(hello?.iscryptd);
|
||||
}
|
||||
get hostAddress() {
|
||||
return utils_1.HostAddress.fromString(this.address);
|
||||
}
|
||||
get allHosts() {
|
||||
return this.hosts.concat(this.arbiters).concat(this.passives);
|
||||
}
|
||||
/** Is this server available for reads*/
|
||||
get isReadable() {
|
||||
return this.type === common_1.ServerType.RSSecondary || this.isWritable;
|
||||
}
|
||||
/** Is this server data bearing */
|
||||
get isDataBearing() {
|
||||
return DATA_BEARING_SERVER_TYPES.has(this.type);
|
||||
}
|
||||
/** Is this server available for writes */
|
||||
get isWritable() {
|
||||
return WRITABLE_SERVER_TYPES.has(this.type);
|
||||
}
|
||||
get host() {
|
||||
const chopLength = `:${this.port}`.length;
|
||||
return this.address.slice(0, -chopLength);
|
||||
}
|
||||
get port() {
|
||||
const port = this.address.split(':').pop();
|
||||
return port ? Number.parseInt(port, 10) : 27017;
|
||||
}
|
||||
/**
|
||||
* Determines if another `ServerDescription` is equal to this one per the rules defined in the SDAM specification.
|
||||
* @see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.md
|
||||
*/
|
||||
equals(other) {
|
||||
// Despite using the comparator that would determine a nullish topologyVersion as greater than
|
||||
// for equality we should only always perform direct equality comparison
|
||||
const topologyVersionsEqual = this.topologyVersion === other?.topologyVersion ||
|
||||
compareTopologyVersion(this.topologyVersion, other?.topologyVersion) === 0;
|
||||
const electionIdsEqual = this.electionId != null && other?.electionId != null
|
||||
? (0, utils_1.compareObjectId)(this.electionId, other.electionId) === 0
|
||||
: this.electionId === other?.electionId;
|
||||
return (other != null &&
|
||||
other.iscryptd === this.iscryptd &&
|
||||
(0, utils_1.errorStrictEqual)(this.error, other.error) &&
|
||||
this.type === other.type &&
|
||||
this.minWireVersion === other.minWireVersion &&
|
||||
(0, utils_1.arrayStrictEqual)(this.hosts, other.hosts) &&
|
||||
tagsStrictEqual(this.tags, other.tags) &&
|
||||
this.setName === other.setName &&
|
||||
this.setVersion === other.setVersion &&
|
||||
electionIdsEqual &&
|
||||
this.primary === other.primary &&
|
||||
this.logicalSessionTimeoutMinutes === other.logicalSessionTimeoutMinutes &&
|
||||
topologyVersionsEqual);
|
||||
}
|
||||
}
|
||||
exports.ServerDescription = ServerDescription;
|
||||
// Parses a `hello` message and determines the server type
|
||||
function parseServerType(hello, options) {
|
||||
if (options?.loadBalanced) {
|
||||
return common_1.ServerType.LoadBalancer;
|
||||
}
|
||||
if (!hello || !hello.ok) {
|
||||
return common_1.ServerType.Unknown;
|
||||
}
|
||||
if (hello.isreplicaset) {
|
||||
return common_1.ServerType.RSGhost;
|
||||
}
|
||||
if (hello.msg && hello.msg === 'isdbgrid') {
|
||||
return common_1.ServerType.Mongos;
|
||||
}
|
||||
if (hello.setName) {
|
||||
if (hello.hidden) {
|
||||
return common_1.ServerType.RSOther;
|
||||
}
|
||||
else if (hello.isWritablePrimary) {
|
||||
return common_1.ServerType.RSPrimary;
|
||||
}
|
||||
else if (hello.secondary) {
|
||||
return common_1.ServerType.RSSecondary;
|
||||
}
|
||||
else if (hello.arbiterOnly) {
|
||||
return common_1.ServerType.RSArbiter;
|
||||
}
|
||||
else {
|
||||
return common_1.ServerType.RSOther;
|
||||
}
|
||||
}
|
||||
return common_1.ServerType.Standalone;
|
||||
}
|
||||
function tagsStrictEqual(tags, tags2) {
|
||||
const tagsKeys = Object.keys(tags);
|
||||
const tags2Keys = Object.keys(tags2);
|
||||
return (tagsKeys.length === tags2Keys.length &&
|
||||
tagsKeys.every((key) => tags2[key] === tags[key]));
|
||||
}
|
||||
/**
|
||||
* Compares two topology versions.
|
||||
*
|
||||
* 1. If the response topologyVersion is unset or the ServerDescription's
|
||||
* topologyVersion is null, the client MUST assume the response is more recent.
|
||||
* 1. If the response's topologyVersion.processId is not equal to the
|
||||
* ServerDescription's, the client MUST assume the response is more recent.
|
||||
* 1. If the response's topologyVersion.processId is equal to the
|
||||
* ServerDescription's, the client MUST use the counter field to determine
|
||||
* which topologyVersion is more recent.
|
||||
*
|
||||
* ```ts
|
||||
* currentTv < newTv === -1
|
||||
* currentTv === newTv === 0
|
||||
* currentTv > newTv === 1
|
||||
* ```
|
||||
*/
|
||||
function compareTopologyVersion(currentTv, newTv) {
|
||||
if (currentTv == null || newTv == null) {
|
||||
return -1;
|
||||
}
|
||||
if (!currentTv.processId.equals(newTv.processId)) {
|
||||
return -1;
|
||||
}
|
||||
// TODO(NODE-2674): Preserve int64 sent from MongoDB
|
||||
const currentCounter = typeof currentTv.counter === 'bigint'
|
||||
? bson_1.Long.fromBigInt(currentTv.counter)
|
||||
: bson_1.Long.isLong(currentTv.counter)
|
||||
? currentTv.counter
|
||||
: bson_1.Long.fromNumber(currentTv.counter);
|
||||
const newCounter = typeof newTv.counter === 'bigint'
|
||||
? bson_1.Long.fromBigInt(newTv.counter)
|
||||
: bson_1.Long.isLong(newTv.counter)
|
||||
? newTv.counter
|
||||
: bson_1.Long.fromNumber(newTv.counter);
|
||||
return currentCounter.compare(newCounter);
|
||||
}
|
||||
//# sourceMappingURL=server_description.js.map
|
||||
1
node_modules/mongodb/lib/sdam/server_description.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/server_description.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
231
node_modules/mongodb/lib/sdam/server_selection.js
generated
vendored
Normal file
231
node_modules/mongodb/lib/sdam/server_selection.js
generated
vendored
Normal file
|
|
@ -0,0 +1,231 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MIN_SECONDARY_WRITE_WIRE_VERSION = void 0;
|
||||
exports.writableServerSelector = writableServerSelector;
|
||||
exports.sameServerSelector = sameServerSelector;
|
||||
exports.secondaryWritableServerSelector = secondaryWritableServerSelector;
|
||||
exports.readPreferenceServerSelector = readPreferenceServerSelector;
|
||||
const error_1 = require("../error");
|
||||
const read_preference_1 = require("../read_preference");
|
||||
const common_1 = require("./common");
|
||||
// max staleness constants
|
||||
const IDLE_WRITE_PERIOD = 10000;
|
||||
const SMALLEST_MAX_STALENESS_SECONDS = 90;
|
||||
// Minimum version to try writes on secondaries.
|
||||
exports.MIN_SECONDARY_WRITE_WIRE_VERSION = 13;
|
||||
/**
|
||||
* Returns a server selector that selects for writable servers
|
||||
*/
|
||||
function writableServerSelector() {
|
||||
return function writableServer(topologyDescription, servers) {
|
||||
return latencyWindowReducer(topologyDescription, servers.filter((s) => s.isWritable));
|
||||
};
|
||||
}
|
||||
/**
|
||||
* The purpose of this selector is to select the same server, only
|
||||
* if it is in a state that it can have commands sent to it.
|
||||
*/
|
||||
function sameServerSelector(description) {
|
||||
return function sameServerSelector(topologyDescription, servers) {
|
||||
if (!description)
|
||||
return [];
|
||||
// Filter the servers to match the provided description only if
|
||||
// the type is not unknown.
|
||||
return servers.filter(sd => {
|
||||
return sd.address === description.address && sd.type !== common_1.ServerType.Unknown;
|
||||
});
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Returns a server selector that uses a read preference to select a
|
||||
* server potentially for a write on a secondary.
|
||||
*/
|
||||
function secondaryWritableServerSelector(wireVersion, readPreference) {
|
||||
// If server version < 5.0, read preference always primary.
|
||||
// If server version >= 5.0...
|
||||
// - If read preference is supplied, use that.
|
||||
// - If no read preference is supplied, use primary.
|
||||
if (!readPreference ||
|
||||
!wireVersion ||
|
||||
(wireVersion && wireVersion < exports.MIN_SECONDARY_WRITE_WIRE_VERSION)) {
|
||||
return readPreferenceServerSelector(read_preference_1.ReadPreference.primary);
|
||||
}
|
||||
return readPreferenceServerSelector(readPreference);
|
||||
}
|
||||
/**
|
||||
* Reduces the passed in array of servers by the rules of the "Max Staleness" specification
|
||||
* found here:
|
||||
*
|
||||
* @see https://github.com/mongodb/specifications/blob/master/source/max-staleness/max-staleness.md
|
||||
*
|
||||
* @param readPreference - The read preference providing max staleness guidance
|
||||
* @param topologyDescription - The topology description
|
||||
* @param servers - The list of server descriptions to be reduced
|
||||
* @returns The list of servers that satisfy the requirements of max staleness
|
||||
*/
|
||||
function maxStalenessReducer(readPreference, topologyDescription, servers) {
|
||||
if (readPreference.maxStalenessSeconds == null || readPreference.maxStalenessSeconds < 0) {
|
||||
return servers;
|
||||
}
|
||||
const maxStaleness = readPreference.maxStalenessSeconds;
|
||||
const maxStalenessVariance = (topologyDescription.heartbeatFrequencyMS + IDLE_WRITE_PERIOD) / 1000;
|
||||
if (maxStaleness < maxStalenessVariance) {
|
||||
throw new error_1.MongoInvalidArgumentError(`Option "maxStalenessSeconds" must be at least ${maxStalenessVariance} seconds`);
|
||||
}
|
||||
if (maxStaleness < SMALLEST_MAX_STALENESS_SECONDS) {
|
||||
throw new error_1.MongoInvalidArgumentError(`Option "maxStalenessSeconds" must be at least ${SMALLEST_MAX_STALENESS_SECONDS} seconds`);
|
||||
}
|
||||
if (topologyDescription.type === common_1.TopologyType.ReplicaSetWithPrimary) {
|
||||
const primary = Array.from(topologyDescription.servers.values()).filter(primaryFilter)[0];
|
||||
return servers.reduce((result, server) => {
|
||||
const stalenessMS = server.lastUpdateTime -
|
||||
server.lastWriteDate -
|
||||
(primary.lastUpdateTime - primary.lastWriteDate) +
|
||||
topologyDescription.heartbeatFrequencyMS;
|
||||
const staleness = stalenessMS / 1000;
|
||||
const maxStalenessSeconds = readPreference.maxStalenessSeconds ?? 0;
|
||||
if (staleness <= maxStalenessSeconds) {
|
||||
result.push(server);
|
||||
}
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
if (topologyDescription.type === common_1.TopologyType.ReplicaSetNoPrimary) {
|
||||
if (servers.length === 0) {
|
||||
return servers;
|
||||
}
|
||||
const sMax = servers.reduce((max, s) => s.lastWriteDate > max.lastWriteDate ? s : max);
|
||||
return servers.reduce((result, server) => {
|
||||
const stalenessMS = sMax.lastWriteDate - server.lastWriteDate + topologyDescription.heartbeatFrequencyMS;
|
||||
const staleness = stalenessMS / 1000;
|
||||
const maxStalenessSeconds = readPreference.maxStalenessSeconds ?? 0;
|
||||
if (staleness <= maxStalenessSeconds) {
|
||||
result.push(server);
|
||||
}
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
return servers;
|
||||
}
|
||||
/**
|
||||
* Determines whether a server's tags match a given set of tags
|
||||
*
|
||||
* @param tagSet - The requested tag set to match
|
||||
* @param serverTags - The server's tags
|
||||
*/
|
||||
function tagSetMatch(tagSet, serverTags) {
|
||||
const keys = Object.keys(tagSet);
|
||||
const serverTagKeys = Object.keys(serverTags);
|
||||
for (let i = 0; i < keys.length; ++i) {
|
||||
const key = keys[i];
|
||||
if (serverTagKeys.indexOf(key) === -1 || serverTags[key] !== tagSet[key]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
/**
|
||||
* Reduces a set of server descriptions based on tags requested by the read preference
|
||||
*
|
||||
* @param readPreference - The read preference providing the requested tags
|
||||
* @param servers - The list of server descriptions to reduce
|
||||
* @returns The list of servers matching the requested tags
|
||||
*/
|
||||
function tagSetReducer(readPreference, servers) {
|
||||
if (readPreference.tags == null ||
|
||||
(Array.isArray(readPreference.tags) && readPreference.tags.length === 0)) {
|
||||
return servers;
|
||||
}
|
||||
for (let i = 0; i < readPreference.tags.length; ++i) {
|
||||
const tagSet = readPreference.tags[i];
|
||||
const serversMatchingTagset = servers.reduce((matched, server) => {
|
||||
if (tagSetMatch(tagSet, server.tags))
|
||||
matched.push(server);
|
||||
return matched;
|
||||
}, []);
|
||||
if (serversMatchingTagset.length) {
|
||||
return serversMatchingTagset;
|
||||
}
|
||||
}
|
||||
return [];
|
||||
}
|
||||
/**
|
||||
* Reduces a list of servers to ensure they fall within an acceptable latency window. This is
|
||||
* further specified in the "Server Selection" specification, found here:
|
||||
*
|
||||
* @see https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.md
|
||||
*
|
||||
* @param topologyDescription - The topology description
|
||||
* @param servers - The list of servers to reduce
|
||||
* @returns The servers which fall within an acceptable latency window
|
||||
*/
|
||||
function latencyWindowReducer(topologyDescription, servers) {
|
||||
const low = servers.reduce((min, server) => Math.min(server.roundTripTime, min), Infinity);
|
||||
const high = low + topologyDescription.localThresholdMS;
|
||||
return servers.reduce((result, server) => {
|
||||
if (server.roundTripTime <= high && server.roundTripTime >= low)
|
||||
result.push(server);
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
// filters
|
||||
function primaryFilter(server) {
|
||||
return server.type === common_1.ServerType.RSPrimary;
|
||||
}
|
||||
function secondaryFilter(server) {
|
||||
return server.type === common_1.ServerType.RSSecondary;
|
||||
}
|
||||
function nearestFilter(server) {
|
||||
return server.type === common_1.ServerType.RSSecondary || server.type === common_1.ServerType.RSPrimary;
|
||||
}
|
||||
function knownFilter(server) {
|
||||
return server.type !== common_1.ServerType.Unknown;
|
||||
}
|
||||
function loadBalancerFilter(server) {
|
||||
return server.type === common_1.ServerType.LoadBalancer;
|
||||
}
|
||||
/**
|
||||
* Returns a function which selects servers based on a provided read preference
|
||||
*
|
||||
* @param readPreference - The read preference to select with
|
||||
*/
|
||||
function readPreferenceServerSelector(readPreference) {
|
||||
if (!readPreference.isValid()) {
|
||||
throw new error_1.MongoInvalidArgumentError('Invalid read preference specified');
|
||||
}
|
||||
return function readPreferenceServers(topologyDescription, servers, deprioritized = []) {
|
||||
if (topologyDescription.type === common_1.TopologyType.LoadBalanced) {
|
||||
return servers.filter(loadBalancerFilter);
|
||||
}
|
||||
if (topologyDescription.type === common_1.TopologyType.Unknown) {
|
||||
return [];
|
||||
}
|
||||
if (topologyDescription.type === common_1.TopologyType.Single) {
|
||||
return latencyWindowReducer(topologyDescription, servers.filter(knownFilter));
|
||||
}
|
||||
if (topologyDescription.type === common_1.TopologyType.Sharded) {
|
||||
const filtered = servers.filter(server => {
|
||||
return !deprioritized.includes(server);
|
||||
});
|
||||
const selectable = filtered.length > 0 ? filtered : deprioritized;
|
||||
return latencyWindowReducer(topologyDescription, selectable.filter(knownFilter));
|
||||
}
|
||||
const mode = readPreference.mode;
|
||||
if (mode === read_preference_1.ReadPreference.PRIMARY) {
|
||||
return servers.filter(primaryFilter);
|
||||
}
|
||||
if (mode === read_preference_1.ReadPreference.PRIMARY_PREFERRED) {
|
||||
const result = servers.filter(primaryFilter);
|
||||
if (result.length) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
const filter = mode === read_preference_1.ReadPreference.NEAREST ? nearestFilter : secondaryFilter;
|
||||
const selectedServers = latencyWindowReducer(topologyDescription, tagSetReducer(readPreference, maxStalenessReducer(readPreference, topologyDescription, servers.filter(filter))));
|
||||
if (mode === read_preference_1.ReadPreference.SECONDARY_PREFERRED && selectedServers.length === 0) {
|
||||
return servers.filter(primaryFilter);
|
||||
}
|
||||
return selectedServers;
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=server_selection.js.map
|
||||
1
node_modules/mongodb/lib/sdam/server_selection.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/server_selection.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
85
node_modules/mongodb/lib/sdam/server_selection_events.js
generated
vendored
Normal file
85
node_modules/mongodb/lib/sdam/server_selection_events.js
generated
vendored
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.WaitingForSuitableServerEvent = exports.ServerSelectionSucceededEvent = exports.ServerSelectionFailedEvent = exports.ServerSelectionStartedEvent = exports.ServerSelectionEvent = void 0;
|
||||
const utils_1 = require(".././utils");
|
||||
const constants_1 = require("../constants");
|
||||
/**
|
||||
* The base export class for all logs published from server selection
|
||||
* @internal
|
||||
* @category Log Type
|
||||
*/
|
||||
class ServerSelectionEvent {
|
||||
/** @internal */
|
||||
constructor(selector, topologyDescription, operation) {
|
||||
this.selector = selector;
|
||||
this.operation = operation;
|
||||
this.topologyDescription = topologyDescription;
|
||||
}
|
||||
}
|
||||
exports.ServerSelectionEvent = ServerSelectionEvent;
|
||||
/**
|
||||
* An event published when server selection starts
|
||||
* @internal
|
||||
* @category Event
|
||||
*/
|
||||
class ServerSelectionStartedEvent extends ServerSelectionEvent {
|
||||
/** @internal */
|
||||
constructor(selector, topologyDescription, operation) {
|
||||
super(selector, topologyDescription, operation);
|
||||
/** @internal */
|
||||
this.name = constants_1.SERVER_SELECTION_STARTED;
|
||||
this.message = 'Server selection started';
|
||||
}
|
||||
}
|
||||
exports.ServerSelectionStartedEvent = ServerSelectionStartedEvent;
|
||||
/**
|
||||
* An event published when a server selection fails
|
||||
* @internal
|
||||
* @category Event
|
||||
*/
|
||||
class ServerSelectionFailedEvent extends ServerSelectionEvent {
|
||||
/** @internal */
|
||||
constructor(selector, topologyDescription, error, operation) {
|
||||
super(selector, topologyDescription, operation);
|
||||
/** @internal */
|
||||
this.name = constants_1.SERVER_SELECTION_FAILED;
|
||||
this.message = 'Server selection failed';
|
||||
this.failure = error;
|
||||
}
|
||||
}
|
||||
exports.ServerSelectionFailedEvent = ServerSelectionFailedEvent;
|
||||
/**
|
||||
* An event published when server selection succeeds
|
||||
* @internal
|
||||
* @category Event
|
||||
*/
|
||||
class ServerSelectionSucceededEvent extends ServerSelectionEvent {
|
||||
/** @internal */
|
||||
constructor(selector, topologyDescription, address, operation) {
|
||||
super(selector, topologyDescription, operation);
|
||||
/** @internal */
|
||||
this.name = constants_1.SERVER_SELECTION_SUCCEEDED;
|
||||
this.message = 'Server selection succeeded';
|
||||
const { host, port } = utils_1.HostAddress.fromString(address).toHostPort();
|
||||
this.serverHost = host;
|
||||
this.serverPort = port;
|
||||
}
|
||||
}
|
||||
exports.ServerSelectionSucceededEvent = ServerSelectionSucceededEvent;
|
||||
/**
|
||||
* An event published when server selection is waiting for a suitable server to become available
|
||||
* @internal
|
||||
* @category Event
|
||||
*/
|
||||
class WaitingForSuitableServerEvent extends ServerSelectionEvent {
|
||||
/** @internal */
|
||||
constructor(selector, topologyDescription, remainingTimeMS, operation) {
|
||||
super(selector, topologyDescription, operation);
|
||||
/** @internal */
|
||||
this.name = constants_1.WAITING_FOR_SUITABLE_SERVER;
|
||||
this.message = 'Waiting for suitable server to become available';
|
||||
this.remainingTimeMS = remainingTimeMS;
|
||||
}
|
||||
}
|
||||
exports.WaitingForSuitableServerEvent = WaitingForSuitableServerEvent;
|
||||
//# sourceMappingURL=server_selection_events.js.map
|
||||
1
node_modules/mongodb/lib/sdam/server_selection_events.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/server_selection_events.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"server_selection_events.js","sourceRoot":"","sources":["../../src/sdam/server_selection_events.ts"],"names":[],"mappings":";;;AAAA,sCAAyC;AACzC,4CAKsB;AAKtB;;;;GAIG;AACH,MAAsB,oBAAoB;IAmBxC,gBAAgB;IAChB,YACE,QAAkD,EAClD,mBAAwC,EACxC,SAAiB;QAEjB,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;QACzB,IAAI,CAAC,SAAS,GAAG,SAAS,CAAC;QAC3B,IAAI,CAAC,mBAAmB,GAAG,mBAAmB,CAAC;IACjD,CAAC;CACF;AA7BD,oDA6BC;AAED;;;;GAIG;AACH,MAAa,2BAA4B,SAAQ,oBAAoB;IAKnE,gBAAgB;IAChB,YACE,QAAkD,EAClD,mBAAwC,EACxC,SAAiB;QAEjB,KAAK,CAAC,QAAQ,EAAE,mBAAmB,EAAE,SAAS,CAAC,CAAC;QAVlD,gBAAgB;QAChB,SAAI,GAAG,oCAAwB,CAAC;QAChC,YAAO,GAAG,0BAA0B,CAAC;IASrC,CAAC;CACF;AAbD,kEAaC;AAED;;;;GAIG;AACH,MAAa,0BAA2B,SAAQ,oBAAoB;IAOlE,gBAAgB;IAChB,YACE,QAAkD,EAClD,mBAAwC,EACxC,KAAY,EACZ,SAAiB;QAEjB,KAAK,CAAC,QAAQ,EAAE,mBAAmB,EAAE,SAAS,CAAC,CAAC;QAblD,gBAAgB;QAChB,SAAI,GAAG,mCAAuB,CAAC;QAC/B,YAAO,GAAG,yBAAyB,CAAC;QAYlC,IAAI,CAAC,OAAO,GAAG,KAAK,CAAC;IACvB,CAAC;CACF;AAjBD,gEAiBC;AAED;;;;GAIG;AACH,MAAa,6BAA8B,SAAQ,oBAAoB;IASrE,gBAAgB;IAChB,YACE,QAAkD,EAClD,mBAAwC,EACxC,OAAe,EACf,SAAiB;QAEjB,KAAK,CAAC,QAAQ,EAAE,mBAAmB,EAAE,SAAS,CAAC,CAAC;QAflD,gBAAgB;QAChB,SAAI,GAAG,sCAA0B,CAAC;QAClC,YAAO,GAAG,4BAA4B,CAAC;QAcrC,MAAM,EAAE,IAAI,EAAE,IAAI,EAAE,GAAG,mBAAW,CAAC,UAAU,CAAC,OAAO,CAAC,CAAC,UAAU,EAAE,CAAC;QACpE,IAAI,CAAC,UAAU,GAAG,IAAI,CAAC;QACvB,IAAI,CAAC,UAAU,GAAG,IAAI,CAAC;IACzB,CAAC;CACF;AArBD,sEAqBC;AAED;;;;GAIG;AACH,MAAa,6BAA8B,SAAQ,oBAAoB;IAOrE,gBAAgB;IAChB,YACE,QAAkD,EAClD,mBAAwC,EACxC,eAAuB,EACvB,SAAiB;QAEjB,KAAK,CAAC,QAAQ,EAAE,mBAAmB,EAAE,SAAS,CAAC,CAAC;QAblD,gBAAgB;QAChB,SAAI,GAAG,uCAA2B,CAAC;QACnC,YAAO,GAAG,iDAAiD,CAAC;QAY1D,IAAI,CAAC,eAAe,GAAG,eAAe,CAAC;IACzC,CAAC;CACF;AAjBD,sEAiBC"}
|
||||
108
node_modules/mongodb/lib/sdam/srv_polling.js
generated
vendored
Normal file
108
node_modules/mongodb/lib/sdam/srv_polling.js
generated
vendored
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.SrvPoller = exports.SrvPollingEvent = void 0;
|
||||
const dns = require("dns");
|
||||
const timers_1 = require("timers");
|
||||
const error_1 = require("../error");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const utils_1 = require("../utils");
|
||||
/**
|
||||
* @internal
|
||||
* @category Event
|
||||
*/
|
||||
class SrvPollingEvent {
|
||||
constructor(srvRecords) {
|
||||
this.srvRecords = srvRecords;
|
||||
}
|
||||
hostnames() {
|
||||
return new Set(this.srvRecords.map(r => utils_1.HostAddress.fromSrvRecord(r).toString()));
|
||||
}
|
||||
}
|
||||
exports.SrvPollingEvent = SrvPollingEvent;
|
||||
/** @internal */
|
||||
class SrvPoller extends mongo_types_1.TypedEventEmitter {
|
||||
/** @event */
|
||||
static { this.SRV_RECORD_DISCOVERY = 'srvRecordDiscovery'; }
|
||||
constructor(options) {
|
||||
super();
|
||||
this.on('error', utils_1.noop);
|
||||
if (!options || !options.srvHost) {
|
||||
throw new error_1.MongoRuntimeError('Options for SrvPoller must exist and include srvHost');
|
||||
}
|
||||
this.srvHost = options.srvHost;
|
||||
this.srvMaxHosts = options.srvMaxHosts ?? 0;
|
||||
this.srvServiceName = options.srvServiceName ?? 'mongodb';
|
||||
this.rescanSrvIntervalMS = 60000;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS ?? 10000;
|
||||
this.haMode = false;
|
||||
this.generation = 0;
|
||||
this._timeout = undefined;
|
||||
}
|
||||
get srvAddress() {
|
||||
return `_${this.srvServiceName}._tcp.${this.srvHost}`;
|
||||
}
|
||||
get intervalMS() {
|
||||
return this.haMode ? this.heartbeatFrequencyMS : this.rescanSrvIntervalMS;
|
||||
}
|
||||
start() {
|
||||
if (!this._timeout) {
|
||||
this.schedule();
|
||||
}
|
||||
}
|
||||
stop() {
|
||||
if (this._timeout) {
|
||||
(0, timers_1.clearTimeout)(this._timeout);
|
||||
this.generation += 1;
|
||||
this._timeout = undefined;
|
||||
}
|
||||
}
|
||||
// TODO(NODE-4994): implement new logging logic for SrvPoller failures
|
||||
schedule() {
|
||||
if (this._timeout) {
|
||||
(0, timers_1.clearTimeout)(this._timeout);
|
||||
}
|
||||
this._timeout = (0, timers_1.setTimeout)(() => {
|
||||
this._poll().then(undefined, utils_1.squashError);
|
||||
}, this.intervalMS);
|
||||
}
|
||||
success(srvRecords) {
|
||||
this.haMode = false;
|
||||
this.schedule();
|
||||
this.emit(SrvPoller.SRV_RECORD_DISCOVERY, new SrvPollingEvent(srvRecords));
|
||||
}
|
||||
failure() {
|
||||
this.haMode = true;
|
||||
this.schedule();
|
||||
}
|
||||
async _poll() {
|
||||
const generation = this.generation;
|
||||
let srvRecords;
|
||||
try {
|
||||
srvRecords = await dns.promises.resolveSrv(this.srvAddress);
|
||||
}
|
||||
catch {
|
||||
this.failure();
|
||||
return;
|
||||
}
|
||||
if (generation !== this.generation) {
|
||||
return;
|
||||
}
|
||||
const finalAddresses = [];
|
||||
for (const record of srvRecords) {
|
||||
try {
|
||||
(0, utils_1.checkParentDomainMatch)(record.name, this.srvHost);
|
||||
finalAddresses.push(record);
|
||||
}
|
||||
catch (error) {
|
||||
(0, utils_1.squashError)(error);
|
||||
}
|
||||
}
|
||||
if (!finalAddresses.length) {
|
||||
this.failure();
|
||||
return;
|
||||
}
|
||||
this.success(finalAddresses);
|
||||
}
|
||||
}
|
||||
exports.SrvPoller = SrvPoller;
|
||||
//# sourceMappingURL=srv_polling.js.map
|
||||
1
node_modules/mongodb/lib/sdam/srv_polling.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/srv_polling.js.map
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"srv_polling.js","sourceRoot":"","sources":["../../src/sdam/srv_polling.ts"],"names":[],"mappings":";;;AAAA,2BAA2B;AAC3B,mCAAkD;AAElD,oCAA6C;AAC7C,gDAAmD;AACnD,oCAAkF;AAElF;;;GAGG;AACH,MAAa,eAAe;IAE1B,YAAY,UAA2B;QACrC,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;IAC/B,CAAC;IAED,SAAS;QACP,OAAO,IAAI,GAAG,CAAC,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,mBAAW,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC;IACpF,CAAC;CACF;AATD,0CASC;AAeD,gBAAgB;AAChB,MAAa,SAAU,SAAQ,+BAAkC;IAU/D,aAAa;aACG,yBAAoB,GAAG,oBAA6B,CAAC;IAErE,YAAY,OAAyB;QACnC,KAAK,EAAE,CAAC;QACR,IAAI,CAAC,EAAE,CAAC,OAAO,EAAE,YAAI,CAAC,CAAC;QAEvB,IAAI,CAAC,OAAO,IAAI,CAAC,OAAO,CAAC,OAAO,EAAE,CAAC;YACjC,MAAM,IAAI,yBAAiB,CAAC,sDAAsD,CAAC,CAAC;QACtF,CAAC;QAED,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC;QAC/B,IAAI,CAAC,WAAW,GAAG,OAAO,CAAC,WAAW,IAAI,CAAC,CAAC;QAC5C,IAAI,CAAC,cAAc,GAAG,OAAO,CAAC,cAAc,IAAI,SAAS,CAAC;QAC1D,IAAI,CAAC,mBAAmB,GAAG,KAAK,CAAC;QACjC,IAAI,CAAC,oBAAoB,GAAG,OAAO,CAAC,oBAAoB,IAAI,KAAK,CAAC;QAElE,IAAI,CAAC,MAAM,GAAG,KAAK,CAAC;QACpB,IAAI,CAAC,UAAU,GAAG,CAAC,CAAC;QAEpB,IAAI,CAAC,QAAQ,GAAG,SAAS,CAAC;IAC5B,CAAC;IAED,IAAI,UAAU;QACZ,OAAO,IAAI,IAAI,CAAC,cAAc,SAAS,IAAI,CAAC,OAAO,EAAE,CAAC;IACxD,CAAC;IAED,IAAI,UAAU;QACZ,OAAO,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,oBAAoB,CAAC,CAAC,CAAC,IAAI,CAAC,mBAAmB,CAAC;IAC5E,CAAC;IAED,KAAK;QACH,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,CAAC;YACnB,IAAI,CAAC,QAAQ,EAAE,CAAC;QAClB,CAAC;IACH,CAAC;IAED,IAAI;QACF,IAAI,IAAI,CAAC,QAAQ,EAAE,CAAC;YAClB,IAAA,qBAAY,EAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;YAC5B,IAAI,CAAC,UAAU,IAAI,CAAC,CAAC;YACrB,IAAI,CAAC,QAAQ,GAAG,SAAS,CAAC;QAC5B,CAAC;IACH,CAAC;IAED,sEAAsE;IACtE,QAAQ;QACN,IAAI,IAAI,CAAC,QAAQ,EAAE,CAAC;YAClB,IAAA,qBAAY,EAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAC9B,CAAC;QAED,IAAI,CAAC,QAAQ,GAAG,IAAA,mBAAU,EAAC,GAAG,EAAE;YAC9B,IAAI,CAAC,KAAK,EAAE,CAAC,IAAI,CAAC,SAAS,EAAE,mBAAW,CAAC,CAAC;QAC5C,CAAC,EAAE,IAAI,CAAC,UAAU,CAAC,CAAC;IACtB,CAAC;IAED,OAAO,CAAC,UAA2B;QACjC,IAAI,CAAC,MAAM,GAAG,KAAK,CAAC;QACpB,IAAI,CAAC,QAAQ,EAAE,CAAC;QAChB,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,oBAAoB,EAAE,IAAI,eAAe,CAAC,UAAU,CAAC,CAAC,CAAC;IAC7E,CAAC;IAED,OAAO;QACL,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;QACnB,IAAI,CAAC,QAAQ,EAAE,CAAC;IAClB,CAAC;IAED,KAAK,CAAC,KAAK;QACT,MAAM,UAAU,GAAG,IAAI,CAAC,UAAU,CAAC;QACnC,IAAI,UAAU,CAAC;QAEf,IAAI,CAAC;YACH,UAAU,GAAG,MAAM,GAAG,CAAC,QAAQ,CAAC,UAAU,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;QAC9D,CAAC;QAAC,MAAM,CAAC;YACP,IAAI,CAAC,OAAO,EAAE,CAAC;YACf,OAAO;QACT,CAAC;QAED,IAAI,UAAU,KAAK,IAAI,CAAC,UAAU,EAAE,CAAC;YACnC,OAAO;QACT,CAAC;QAED,MAAM,cAAc,GAAoB,EAAE,CAAC;QAC3C,KAAK,MAAM,MAAM,IAAI,UAAU,EAAE,CAAC;YAChC,IAAI,CAAC;gBACH,IAAA,8BAAsB,EAAC,MAAM,CAAC,IAAI,EAAE,IAAI,CAAC,OAAO,CAAC,CAAC;gBAClD,cAAc,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;YAC9B,CAAC;YAAC,OAAO,KAAK,EAAE,CAAC;gBACf,IAAA,mBAAW,EAAC,KAAK,CAAC,CAAC;YACrB,CAAC;QACH,CAAC;QAED,IAAI,CAAC,cAAc,CAAC,MAAM,EAAE,CAAC;YAC3B,IAAI,CAAC,OAAO,EAAE,CAAC;YACf,OAAO;QACT,CAAC;QAED,IAAI,CAAC,OAAO,CAAC,cAAc,CAAC,CAAC;IAC/B,CAAC;;AA5GH,8BA6GC"}
|
||||
650
node_modules/mongodb/lib/sdam/topology.js
generated
vendored
Normal file
650
node_modules/mongodb/lib/sdam/topology.js
generated
vendored
Normal file
|
|
@ -0,0 +1,650 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Topology = void 0;
|
||||
const connection_string_1 = require("../connection_string");
|
||||
const constants_1 = require("../constants");
|
||||
const error_1 = require("../error");
|
||||
const mongo_logger_1 = require("../mongo_logger");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const read_preference_1 = require("../read_preference");
|
||||
const timeout_1 = require("../timeout");
|
||||
const utils_1 = require("../utils");
|
||||
const common_1 = require("./common");
|
||||
const events_1 = require("./events");
|
||||
const server_1 = require("./server");
|
||||
const server_description_1 = require("./server_description");
|
||||
const server_selection_1 = require("./server_selection");
|
||||
const server_selection_events_1 = require("./server_selection_events");
|
||||
const srv_polling_1 = require("./srv_polling");
|
||||
const topology_description_1 = require("./topology_description");
|
||||
// Global state
|
||||
let globalTopologyCounter = 0;
|
||||
const stateTransition = (0, utils_1.makeStateMachine)({
|
||||
[common_1.STATE_CLOSED]: [common_1.STATE_CLOSED, common_1.STATE_CONNECTING],
|
||||
[common_1.STATE_CONNECTING]: [common_1.STATE_CONNECTING, common_1.STATE_CLOSING, common_1.STATE_CONNECTED, common_1.STATE_CLOSED],
|
||||
[common_1.STATE_CONNECTED]: [common_1.STATE_CONNECTED, common_1.STATE_CLOSING, common_1.STATE_CLOSED],
|
||||
[common_1.STATE_CLOSING]: [common_1.STATE_CLOSING, common_1.STATE_CLOSED]
|
||||
});
|
||||
/**
|
||||
* A container of server instances representing a connection to a MongoDB topology.
|
||||
* @internal
|
||||
*/
|
||||
class Topology extends mongo_types_1.TypedEventEmitter {
|
||||
/** @event */
|
||||
static { this.SERVER_OPENING = constants_1.SERVER_OPENING; }
|
||||
/** @event */
|
||||
static { this.SERVER_CLOSED = constants_1.SERVER_CLOSED; }
|
||||
/** @event */
|
||||
static { this.SERVER_DESCRIPTION_CHANGED = constants_1.SERVER_DESCRIPTION_CHANGED; }
|
||||
/** @event */
|
||||
static { this.TOPOLOGY_OPENING = constants_1.TOPOLOGY_OPENING; }
|
||||
/** @event */
|
||||
static { this.TOPOLOGY_CLOSED = constants_1.TOPOLOGY_CLOSED; }
|
||||
/** @event */
|
||||
static { this.TOPOLOGY_DESCRIPTION_CHANGED = constants_1.TOPOLOGY_DESCRIPTION_CHANGED; }
|
||||
/** @event */
|
||||
static { this.ERROR = constants_1.ERROR; }
|
||||
/** @event */
|
||||
static { this.OPEN = constants_1.OPEN; }
|
||||
/** @event */
|
||||
static { this.CONNECT = constants_1.CONNECT; }
|
||||
/** @event */
|
||||
static { this.CLOSE = constants_1.CLOSE; }
|
||||
/** @event */
|
||||
static { this.TIMEOUT = constants_1.TIMEOUT; }
|
||||
/**
|
||||
* @param seedlist - a list of HostAddress instances to connect to
|
||||
*/
|
||||
constructor(client, seeds, options) {
|
||||
super();
|
||||
this.on('error', utils_1.noop);
|
||||
this.client = client;
|
||||
// Options should only be undefined in tests, MongoClient will always have defined options
|
||||
options = options ?? {
|
||||
hosts: [utils_1.HostAddress.fromString('localhost:27017')],
|
||||
...Object.fromEntries(connection_string_1.DEFAULT_OPTIONS.entries())
|
||||
};
|
||||
if (typeof seeds === 'string') {
|
||||
seeds = [utils_1.HostAddress.fromString(seeds)];
|
||||
}
|
||||
else if (!Array.isArray(seeds)) {
|
||||
seeds = [seeds];
|
||||
}
|
||||
const seedlist = [];
|
||||
for (const seed of seeds) {
|
||||
if (typeof seed === 'string') {
|
||||
seedlist.push(utils_1.HostAddress.fromString(seed));
|
||||
}
|
||||
else if (seed instanceof utils_1.HostAddress) {
|
||||
seedlist.push(seed);
|
||||
}
|
||||
else {
|
||||
// FIXME(NODE-3483): May need to be a MongoParseError
|
||||
throw new error_1.MongoRuntimeError(`Topology cannot be constructed from ${JSON.stringify(seed)}`);
|
||||
}
|
||||
}
|
||||
const topologyType = topologyTypeFromOptions(options);
|
||||
const topologyId = globalTopologyCounter++;
|
||||
const selectedHosts = options.srvMaxHosts == null ||
|
||||
options.srvMaxHosts === 0 ||
|
||||
options.srvMaxHosts >= seedlist.length
|
||||
? seedlist
|
||||
: (0, utils_1.shuffle)(seedlist, options.srvMaxHosts);
|
||||
const serverDescriptions = new Map();
|
||||
for (const hostAddress of selectedHosts) {
|
||||
serverDescriptions.set(hostAddress.toString(), new server_description_1.ServerDescription(hostAddress));
|
||||
}
|
||||
this.waitQueue = new utils_1.List();
|
||||
this.s = {
|
||||
// the id of this topology
|
||||
id: topologyId,
|
||||
// passed in options
|
||||
options,
|
||||
// initial seedlist of servers to connect to
|
||||
seedlist,
|
||||
// initial state
|
||||
state: common_1.STATE_CLOSED,
|
||||
// the topology description
|
||||
description: new topology_description_1.TopologyDescription(topologyType, serverDescriptions, options.replicaSet, undefined, undefined, undefined, options),
|
||||
serverSelectionTimeoutMS: options.serverSelectionTimeoutMS,
|
||||
heartbeatFrequencyMS: options.heartbeatFrequencyMS,
|
||||
minHeartbeatFrequencyMS: options.minHeartbeatFrequencyMS,
|
||||
// a map of server instances to normalized addresses
|
||||
servers: new Map(),
|
||||
credentials: options?.credentials,
|
||||
clusterTime: undefined,
|
||||
detectShardedTopology: ev => this.detectShardedTopology(ev),
|
||||
detectSrvRecords: ev => this.detectSrvRecords(ev)
|
||||
};
|
||||
this.mongoLogger = client.mongoLogger;
|
||||
this.component = 'topology';
|
||||
if (options.srvHost && !options.loadBalanced) {
|
||||
this.s.srvPoller =
|
||||
options.srvPoller ??
|
||||
new srv_polling_1.SrvPoller({
|
||||
heartbeatFrequencyMS: this.s.heartbeatFrequencyMS,
|
||||
srvHost: options.srvHost,
|
||||
srvMaxHosts: options.srvMaxHosts,
|
||||
srvServiceName: options.srvServiceName
|
||||
});
|
||||
this.on(Topology.TOPOLOGY_DESCRIPTION_CHANGED, this.s.detectShardedTopology);
|
||||
}
|
||||
this.connectionLock = undefined;
|
||||
}
|
||||
detectShardedTopology(event) {
|
||||
const previousType = event.previousDescription.type;
|
||||
const newType = event.newDescription.type;
|
||||
const transitionToSharded = previousType !== common_1.TopologyType.Sharded && newType === common_1.TopologyType.Sharded;
|
||||
const srvListeners = this.s.srvPoller?.listeners(srv_polling_1.SrvPoller.SRV_RECORD_DISCOVERY);
|
||||
const listeningToSrvPolling = !!srvListeners?.includes(this.s.detectSrvRecords);
|
||||
if (transitionToSharded && !listeningToSrvPolling) {
|
||||
this.s.srvPoller?.on(srv_polling_1.SrvPoller.SRV_RECORD_DISCOVERY, this.s.detectSrvRecords);
|
||||
this.s.srvPoller?.start();
|
||||
}
|
||||
}
|
||||
detectSrvRecords(ev) {
|
||||
const previousTopologyDescription = this.s.description;
|
||||
this.s.description = this.s.description.updateFromSrvPollingEvent(ev, this.s.options.srvMaxHosts);
|
||||
if (this.s.description === previousTopologyDescription) {
|
||||
// Nothing changed, so return
|
||||
return;
|
||||
}
|
||||
updateServers(this);
|
||||
this.emitAndLog(Topology.TOPOLOGY_DESCRIPTION_CHANGED, new events_1.TopologyDescriptionChangedEvent(this.s.id, previousTopologyDescription, this.s.description));
|
||||
}
|
||||
/**
|
||||
* @returns A `TopologyDescription` for this topology
|
||||
*/
|
||||
get description() {
|
||||
return this.s.description;
|
||||
}
|
||||
get loadBalanced() {
|
||||
return this.s.options.loadBalanced;
|
||||
}
|
||||
get serverApi() {
|
||||
return this.s.options.serverApi;
|
||||
}
|
||||
/** Initiate server connect */
|
||||
async connect(options) {
|
||||
this.connectionLock ??= this._connect(options);
|
||||
try {
|
||||
await this.connectionLock;
|
||||
return this;
|
||||
}
|
||||
finally {
|
||||
this.connectionLock = undefined;
|
||||
}
|
||||
}
|
||||
async _connect(options) {
|
||||
options = options ?? {};
|
||||
if (this.s.state === common_1.STATE_CONNECTED) {
|
||||
return this;
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CONNECTING);
|
||||
// emit SDAM monitoring events
|
||||
this.emitAndLog(Topology.TOPOLOGY_OPENING, new events_1.TopologyOpeningEvent(this.s.id));
|
||||
// emit an event for the topology change
|
||||
this.emitAndLog(Topology.TOPOLOGY_DESCRIPTION_CHANGED, new events_1.TopologyDescriptionChangedEvent(this.s.id, new topology_description_1.TopologyDescription(common_1.TopologyType.Unknown), // initial is always Unknown
|
||||
this.s.description));
|
||||
// connect all known servers, then attempt server selection to connect
|
||||
const serverDescriptions = Array.from(this.s.description.servers.values());
|
||||
this.s.servers = new Map(serverDescriptions.map(serverDescription => [
|
||||
serverDescription.address,
|
||||
createAndConnectServer(this, serverDescription)
|
||||
]));
|
||||
// In load balancer mode we need to fake a server description getting
|
||||
// emitted from the monitor, since the monitor doesn't exist.
|
||||
if (this.s.options.loadBalanced) {
|
||||
for (const description of serverDescriptions) {
|
||||
const newDescription = new server_description_1.ServerDescription(description.hostAddress, undefined, {
|
||||
loadBalanced: this.s.options.loadBalanced
|
||||
});
|
||||
this.serverUpdateHandler(newDescription);
|
||||
}
|
||||
}
|
||||
const serverSelectionTimeoutMS = this.client.s.options.serverSelectionTimeoutMS;
|
||||
const readPreference = options.readPreference ?? read_preference_1.ReadPreference.primary;
|
||||
const timeoutContext = timeout_1.TimeoutContext.create({
|
||||
// TODO(NODE-6448): auto-connect ignores timeoutMS; potential future feature
|
||||
timeoutMS: undefined,
|
||||
serverSelectionTimeoutMS,
|
||||
waitQueueTimeoutMS: this.client.s.options.waitQueueTimeoutMS
|
||||
});
|
||||
const selectServerOptions = {
|
||||
operationName: 'handshake',
|
||||
...options,
|
||||
timeoutContext
|
||||
};
|
||||
try {
|
||||
const server = await this.selectServer((0, server_selection_1.readPreferenceServerSelector)(readPreference), selectServerOptions);
|
||||
const skipPingOnConnect = this.s.options.__skipPingOnConnect === true;
|
||||
if (!skipPingOnConnect) {
|
||||
const connection = await server.pool.checkOut({ timeoutContext: timeoutContext });
|
||||
server.pool.checkIn(connection);
|
||||
stateTransition(this, common_1.STATE_CONNECTED);
|
||||
this.emit(Topology.OPEN, this);
|
||||
this.emit(Topology.CONNECT, this);
|
||||
return this;
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CONNECTED);
|
||||
this.emit(Topology.OPEN, this);
|
||||
this.emit(Topology.CONNECT, this);
|
||||
return this;
|
||||
}
|
||||
catch (error) {
|
||||
this.close();
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
closeCheckedOutConnections() {
|
||||
for (const server of this.s.servers.values()) {
|
||||
return server.closeCheckedOutConnections();
|
||||
}
|
||||
}
|
||||
/** Close this topology */
|
||||
close() {
|
||||
if (this.s.state === common_1.STATE_CLOSED || this.s.state === common_1.STATE_CLOSING) {
|
||||
return;
|
||||
}
|
||||
for (const server of this.s.servers.values()) {
|
||||
closeServer(server, this);
|
||||
}
|
||||
this.s.servers.clear();
|
||||
stateTransition(this, common_1.STATE_CLOSING);
|
||||
drainWaitQueue(this.waitQueue, new error_1.MongoTopologyClosedError());
|
||||
if (this.s.srvPoller) {
|
||||
this.s.srvPoller.stop();
|
||||
this.s.srvPoller.removeListener(srv_polling_1.SrvPoller.SRV_RECORD_DISCOVERY, this.s.detectSrvRecords);
|
||||
}
|
||||
this.removeListener(Topology.TOPOLOGY_DESCRIPTION_CHANGED, this.s.detectShardedTopology);
|
||||
stateTransition(this, common_1.STATE_CLOSED);
|
||||
// emit an event for close
|
||||
this.emitAndLog(Topology.TOPOLOGY_CLOSED, new events_1.TopologyClosedEvent(this.s.id));
|
||||
}
|
||||
/**
|
||||
* Selects a server according to the selection predicate provided
|
||||
*
|
||||
* @param selector - An optional selector to select servers by, defaults to a random selection within a latency window
|
||||
* @param options - Optional settings related to server selection
|
||||
* @param callback - The callback used to indicate success or failure
|
||||
* @returns An instance of a `Server` meeting the criteria of the predicate provided
|
||||
*/
|
||||
async selectServer(selector, options) {
|
||||
let serverSelector;
|
||||
if (typeof selector !== 'function') {
|
||||
if (typeof selector === 'string') {
|
||||
serverSelector = (0, server_selection_1.readPreferenceServerSelector)(read_preference_1.ReadPreference.fromString(selector));
|
||||
}
|
||||
else {
|
||||
let readPreference;
|
||||
if (selector instanceof read_preference_1.ReadPreference) {
|
||||
readPreference = selector;
|
||||
}
|
||||
else {
|
||||
read_preference_1.ReadPreference.translate(options);
|
||||
readPreference = options.readPreference || read_preference_1.ReadPreference.primary;
|
||||
}
|
||||
serverSelector = (0, server_selection_1.readPreferenceServerSelector)(readPreference);
|
||||
}
|
||||
}
|
||||
else {
|
||||
serverSelector = selector;
|
||||
}
|
||||
options = { serverSelectionTimeoutMS: this.s.serverSelectionTimeoutMS, ...options };
|
||||
if (this.client.mongoLogger?.willLog(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, mongo_logger_1.SeverityLevel.DEBUG)) {
|
||||
this.client.mongoLogger?.debug(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, new server_selection_events_1.ServerSelectionStartedEvent(selector, this.description, options.operationName));
|
||||
}
|
||||
let timeout;
|
||||
if (options.timeoutContext)
|
||||
timeout = options.timeoutContext.serverSelectionTimeout;
|
||||
else {
|
||||
timeout = timeout_1.Timeout.expires(options.serverSelectionTimeoutMS ?? 0);
|
||||
}
|
||||
const isSharded = this.description.type === common_1.TopologyType.Sharded;
|
||||
const session = options.session;
|
||||
const transaction = session && session.transaction;
|
||||
if (isSharded && transaction && transaction.server) {
|
||||
if (this.client.mongoLogger?.willLog(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, mongo_logger_1.SeverityLevel.DEBUG)) {
|
||||
this.client.mongoLogger?.debug(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, new server_selection_events_1.ServerSelectionSucceededEvent(selector, this.description, transaction.server.pool.address, options.operationName));
|
||||
}
|
||||
if (options.timeoutContext?.clearServerSelectionTimeout)
|
||||
timeout?.clear();
|
||||
return transaction.server;
|
||||
}
|
||||
const { promise: serverPromise, resolve, reject } = (0, utils_1.promiseWithResolvers)();
|
||||
const waitQueueMember = {
|
||||
serverSelector,
|
||||
topologyDescription: this.description,
|
||||
mongoLogger: this.client.mongoLogger,
|
||||
transaction,
|
||||
resolve,
|
||||
reject,
|
||||
cancelled: false,
|
||||
startTime: (0, utils_1.now)(),
|
||||
operationName: options.operationName,
|
||||
waitingLogged: false,
|
||||
previousServer: options.previousServer
|
||||
};
|
||||
const abortListener = (0, utils_1.addAbortListener)(options.signal, function () {
|
||||
waitQueueMember.cancelled = true;
|
||||
reject(this.reason);
|
||||
});
|
||||
this.waitQueue.push(waitQueueMember);
|
||||
processWaitQueue(this);
|
||||
try {
|
||||
timeout?.throwIfExpired();
|
||||
const server = await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise);
|
||||
if (options.timeoutContext?.csotEnabled() && server.description.minRoundTripTime !== 0) {
|
||||
options.timeoutContext.minRoundTripTime = server.description.minRoundTripTime;
|
||||
}
|
||||
return server;
|
||||
}
|
||||
catch (error) {
|
||||
if (timeout_1.TimeoutError.is(error)) {
|
||||
// Timeout
|
||||
waitQueueMember.cancelled = true;
|
||||
const timeoutError = new error_1.MongoServerSelectionError(`Server selection timed out after ${timeout?.duration} ms`, this.description);
|
||||
if (this.client.mongoLogger?.willLog(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, mongo_logger_1.SeverityLevel.DEBUG)) {
|
||||
this.client.mongoLogger?.debug(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, new server_selection_events_1.ServerSelectionFailedEvent(selector, this.description, timeoutError, options.operationName));
|
||||
}
|
||||
if (options.timeoutContext?.csotEnabled()) {
|
||||
throw new error_1.MongoOperationTimeoutError('Timed out during server selection', {
|
||||
cause: timeoutError
|
||||
});
|
||||
}
|
||||
throw timeoutError;
|
||||
}
|
||||
// Other server selection error
|
||||
throw error;
|
||||
}
|
||||
finally {
|
||||
abortListener?.[utils_1.kDispose]();
|
||||
if (options.timeoutContext?.clearServerSelectionTimeout)
|
||||
timeout?.clear();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Update the internal TopologyDescription with a ServerDescription
|
||||
*
|
||||
* @param serverDescription - The server to update in the internal list of server descriptions
|
||||
*/
|
||||
serverUpdateHandler(serverDescription) {
|
||||
if (!this.s.description.hasServer(serverDescription.address)) {
|
||||
return;
|
||||
}
|
||||
// ignore this server update if its from an outdated topologyVersion
|
||||
if (isStaleServerDescription(this.s.description, serverDescription)) {
|
||||
return;
|
||||
}
|
||||
// these will be used for monitoring events later
|
||||
const previousTopologyDescription = this.s.description;
|
||||
const previousServerDescription = this.s.description.servers.get(serverDescription.address);
|
||||
if (!previousServerDescription) {
|
||||
return;
|
||||
}
|
||||
// Driver Sessions Spec: "Whenever a driver receives a cluster time from
|
||||
// a server it MUST compare it to the current highest seen cluster time
|
||||
// for the deployment. If the new cluster time is higher than the
|
||||
// highest seen cluster time it MUST become the new highest seen cluster
|
||||
// time. Two cluster times are compared using only the BsonTimestamp
|
||||
// value of the clusterTime embedded field."
|
||||
const clusterTime = serverDescription.$clusterTime;
|
||||
if (clusterTime) {
|
||||
(0, common_1._advanceClusterTime)(this, clusterTime);
|
||||
}
|
||||
// If we already know all the information contained in this updated description, then
|
||||
// we don't need to emit SDAM events, but still need to update the description, in order
|
||||
// to keep client-tracked attributes like last update time and round trip time up to date
|
||||
const equalDescriptions = previousServerDescription && previousServerDescription.equals(serverDescription);
|
||||
// first update the TopologyDescription
|
||||
this.s.description = this.s.description.update(serverDescription);
|
||||
if (this.s.description.compatibilityError) {
|
||||
this.emit(Topology.ERROR, new error_1.MongoCompatibilityError(this.s.description.compatibilityError));
|
||||
return;
|
||||
}
|
||||
// emit monitoring events for this change
|
||||
if (!equalDescriptions) {
|
||||
const newDescription = this.s.description.servers.get(serverDescription.address);
|
||||
if (newDescription) {
|
||||
this.emit(Topology.SERVER_DESCRIPTION_CHANGED, new events_1.ServerDescriptionChangedEvent(this.s.id, serverDescription.address, previousServerDescription, newDescription));
|
||||
}
|
||||
}
|
||||
// update server list from updated descriptions
|
||||
updateServers(this, serverDescription);
|
||||
// attempt to resolve any outstanding server selection attempts
|
||||
if (this.waitQueue.length > 0) {
|
||||
processWaitQueue(this);
|
||||
}
|
||||
if (!equalDescriptions) {
|
||||
this.emitAndLog(Topology.TOPOLOGY_DESCRIPTION_CHANGED, new events_1.TopologyDescriptionChangedEvent(this.s.id, previousTopologyDescription, this.s.description));
|
||||
}
|
||||
}
|
||||
auth(credentials, callback) {
|
||||
if (typeof credentials === 'function')
|
||||
((callback = credentials), (credentials = undefined));
|
||||
if (typeof callback === 'function')
|
||||
callback(undefined, true);
|
||||
}
|
||||
isConnected() {
|
||||
return this.s.state === common_1.STATE_CONNECTED;
|
||||
}
|
||||
isDestroyed() {
|
||||
return this.s.state === common_1.STATE_CLOSED;
|
||||
}
|
||||
// NOTE: There are many places in code where we explicitly check the last hello
|
||||
// to do feature support detection. This should be done any other way, but for
|
||||
// now we will just return the first hello seen, which should suffice.
|
||||
lastHello() {
|
||||
const serverDescriptions = Array.from(this.description.servers.values());
|
||||
if (serverDescriptions.length === 0)
|
||||
return {};
|
||||
const sd = serverDescriptions.filter((sd) => sd.type !== common_1.ServerType.Unknown)[0];
|
||||
const result = sd || { maxWireVersion: this.description.commonWireVersion };
|
||||
return result;
|
||||
}
|
||||
get commonWireVersion() {
|
||||
return this.description.commonWireVersion;
|
||||
}
|
||||
get logicalSessionTimeoutMinutes() {
|
||||
return this.description.logicalSessionTimeoutMinutes;
|
||||
}
|
||||
get clusterTime() {
|
||||
return this.s.clusterTime;
|
||||
}
|
||||
set clusterTime(clusterTime) {
|
||||
this.s.clusterTime = clusterTime;
|
||||
}
|
||||
}
|
||||
exports.Topology = Topology;
|
||||
/** Destroys a server, and removes all event listeners from the instance */
|
||||
function closeServer(server, topology) {
|
||||
for (const event of constants_1.LOCAL_SERVER_EVENTS) {
|
||||
server.removeAllListeners(event);
|
||||
}
|
||||
server.close();
|
||||
topology.emitAndLog(Topology.SERVER_CLOSED, new events_1.ServerClosedEvent(topology.s.id, server.description.address));
|
||||
for (const event of constants_1.SERVER_RELAY_EVENTS) {
|
||||
server.removeAllListeners(event);
|
||||
}
|
||||
}
|
||||
/** Predicts the TopologyType from options */
|
||||
function topologyTypeFromOptions(options) {
|
||||
if (options?.directConnection) {
|
||||
return common_1.TopologyType.Single;
|
||||
}
|
||||
if (options?.replicaSet) {
|
||||
return common_1.TopologyType.ReplicaSetNoPrimary;
|
||||
}
|
||||
if (options?.loadBalanced) {
|
||||
return common_1.TopologyType.LoadBalanced;
|
||||
}
|
||||
return common_1.TopologyType.Unknown;
|
||||
}
|
||||
/**
|
||||
* Creates new server instances and attempts to connect them
|
||||
*
|
||||
* @param topology - The topology that this server belongs to
|
||||
* @param serverDescription - The description for the server to initialize and connect to
|
||||
*/
|
||||
function createAndConnectServer(topology, serverDescription) {
|
||||
topology.emitAndLog(Topology.SERVER_OPENING, new events_1.ServerOpeningEvent(topology.s.id, serverDescription.address));
|
||||
const server = new server_1.Server(topology, serverDescription, topology.s.options);
|
||||
for (const event of constants_1.SERVER_RELAY_EVENTS) {
|
||||
server.on(event, (e) => topology.emit(event, e));
|
||||
}
|
||||
server.on(server_1.Server.DESCRIPTION_RECEIVED, description => topology.serverUpdateHandler(description));
|
||||
server.connect();
|
||||
return server;
|
||||
}
|
||||
/**
|
||||
* @param topology - Topology to update.
|
||||
* @param incomingServerDescription - New server description.
|
||||
*/
|
||||
function updateServers(topology, incomingServerDescription) {
|
||||
// update the internal server's description
|
||||
if (incomingServerDescription && topology.s.servers.has(incomingServerDescription.address)) {
|
||||
const server = topology.s.servers.get(incomingServerDescription.address);
|
||||
if (server) {
|
||||
server.s.description = incomingServerDescription;
|
||||
if (incomingServerDescription.error instanceof error_1.MongoError &&
|
||||
incomingServerDescription.error.hasErrorLabel(error_1.MongoErrorLabel.ResetPool)) {
|
||||
const interruptInUseConnections = incomingServerDescription.error.hasErrorLabel(error_1.MongoErrorLabel.InterruptInUseConnections);
|
||||
server.pool.clear({ interruptInUseConnections });
|
||||
}
|
||||
else if (incomingServerDescription.error == null) {
|
||||
const newTopologyType = topology.s.description.type;
|
||||
const shouldMarkPoolReady = incomingServerDescription.isDataBearing ||
|
||||
(incomingServerDescription.type !== common_1.ServerType.Unknown &&
|
||||
newTopologyType === common_1.TopologyType.Single);
|
||||
if (shouldMarkPoolReady) {
|
||||
server.pool.ready();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// add new servers for all descriptions we currently don't know about locally
|
||||
for (const serverDescription of topology.description.servers.values()) {
|
||||
if (!topology.s.servers.has(serverDescription.address)) {
|
||||
const server = createAndConnectServer(topology, serverDescription);
|
||||
topology.s.servers.set(serverDescription.address, server);
|
||||
}
|
||||
}
|
||||
// for all servers no longer known, remove their descriptions and destroy their instances
|
||||
for (const entry of topology.s.servers) {
|
||||
const serverAddress = entry[0];
|
||||
if (topology.description.hasServer(serverAddress)) {
|
||||
continue;
|
||||
}
|
||||
if (!topology.s.servers.has(serverAddress)) {
|
||||
continue;
|
||||
}
|
||||
const server = topology.s.servers.get(serverAddress);
|
||||
topology.s.servers.delete(serverAddress);
|
||||
// prepare server for garbage collection
|
||||
if (server) {
|
||||
closeServer(server, topology);
|
||||
}
|
||||
}
|
||||
}
|
||||
function drainWaitQueue(queue, drainError) {
|
||||
while (queue.length) {
|
||||
const waitQueueMember = queue.shift();
|
||||
if (!waitQueueMember) {
|
||||
continue;
|
||||
}
|
||||
if (!waitQueueMember.cancelled) {
|
||||
if (waitQueueMember.mongoLogger?.willLog(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, mongo_logger_1.SeverityLevel.DEBUG)) {
|
||||
waitQueueMember.mongoLogger?.debug(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, new server_selection_events_1.ServerSelectionFailedEvent(waitQueueMember.serverSelector, waitQueueMember.topologyDescription, drainError, waitQueueMember.operationName));
|
||||
}
|
||||
waitQueueMember.reject(drainError);
|
||||
}
|
||||
}
|
||||
}
|
||||
function processWaitQueue(topology) {
|
||||
if (topology.s.state === common_1.STATE_CLOSED) {
|
||||
drainWaitQueue(topology.waitQueue, new error_1.MongoTopologyClosedError());
|
||||
return;
|
||||
}
|
||||
const isSharded = topology.description.type === common_1.TopologyType.Sharded;
|
||||
const serverDescriptions = Array.from(topology.description.servers.values());
|
||||
const membersToProcess = topology.waitQueue.length;
|
||||
for (let i = 0; i < membersToProcess; ++i) {
|
||||
const waitQueueMember = topology.waitQueue.shift();
|
||||
if (!waitQueueMember) {
|
||||
continue;
|
||||
}
|
||||
if (waitQueueMember.cancelled) {
|
||||
continue;
|
||||
}
|
||||
let selectedDescriptions;
|
||||
try {
|
||||
const serverSelector = waitQueueMember.serverSelector;
|
||||
const previousServer = waitQueueMember.previousServer;
|
||||
selectedDescriptions = serverSelector
|
||||
? serverSelector(topology.description, serverDescriptions, previousServer ? [previousServer] : [])
|
||||
: serverDescriptions;
|
||||
}
|
||||
catch (selectorError) {
|
||||
if (topology.client.mongoLogger?.willLog(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, mongo_logger_1.SeverityLevel.DEBUG)) {
|
||||
topology.client.mongoLogger?.debug(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, new server_selection_events_1.ServerSelectionFailedEvent(waitQueueMember.serverSelector, topology.description, selectorError, waitQueueMember.operationName));
|
||||
}
|
||||
waitQueueMember.reject(selectorError);
|
||||
continue;
|
||||
}
|
||||
let selectedServer;
|
||||
if (selectedDescriptions.length === 0) {
|
||||
if (!waitQueueMember.waitingLogged) {
|
||||
if (topology.client.mongoLogger?.willLog(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, mongo_logger_1.SeverityLevel.INFORMATIONAL)) {
|
||||
topology.client.mongoLogger?.info(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, new server_selection_events_1.WaitingForSuitableServerEvent(waitQueueMember.serverSelector, topology.description, topology.s.serverSelectionTimeoutMS !== 0
|
||||
? topology.s.serverSelectionTimeoutMS - ((0, utils_1.now)() - waitQueueMember.startTime)
|
||||
: -1, waitQueueMember.operationName));
|
||||
}
|
||||
waitQueueMember.waitingLogged = true;
|
||||
}
|
||||
topology.waitQueue.push(waitQueueMember);
|
||||
continue;
|
||||
}
|
||||
else if (selectedDescriptions.length === 1) {
|
||||
selectedServer = topology.s.servers.get(selectedDescriptions[0].address);
|
||||
}
|
||||
else {
|
||||
const descriptions = (0, utils_1.shuffle)(selectedDescriptions, 2);
|
||||
const server1 = topology.s.servers.get(descriptions[0].address);
|
||||
const server2 = topology.s.servers.get(descriptions[1].address);
|
||||
selectedServer =
|
||||
server1 && server2 && server1.s.operationCount < server2.s.operationCount
|
||||
? server1
|
||||
: server2;
|
||||
}
|
||||
if (!selectedServer) {
|
||||
const serverSelectionError = new error_1.MongoServerSelectionError('server selection returned a server description but the server was not found in the topology', topology.description);
|
||||
if (topology.client.mongoLogger?.willLog(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, mongo_logger_1.SeverityLevel.DEBUG)) {
|
||||
topology.client.mongoLogger?.debug(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, new server_selection_events_1.ServerSelectionFailedEvent(waitQueueMember.serverSelector, topology.description, serverSelectionError, waitQueueMember.operationName));
|
||||
}
|
||||
waitQueueMember.reject(serverSelectionError);
|
||||
return;
|
||||
}
|
||||
const transaction = waitQueueMember.transaction;
|
||||
if (isSharded && transaction && transaction.isActive && selectedServer) {
|
||||
transaction.pinServer(selectedServer);
|
||||
}
|
||||
if (topology.client.mongoLogger?.willLog(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, mongo_logger_1.SeverityLevel.DEBUG)) {
|
||||
topology.client.mongoLogger?.debug(mongo_logger_1.MongoLoggableComponent.SERVER_SELECTION, new server_selection_events_1.ServerSelectionSucceededEvent(waitQueueMember.serverSelector, waitQueueMember.topologyDescription, selectedServer.pool.address, waitQueueMember.operationName));
|
||||
}
|
||||
waitQueueMember.resolve(selectedServer);
|
||||
}
|
||||
if (topology.waitQueue.length > 0) {
|
||||
// ensure all server monitors attempt monitoring soon
|
||||
for (const [, server] of topology.s.servers) {
|
||||
process.nextTick(function scheduleServerCheck() {
|
||||
return server.requestCheck();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
function isStaleServerDescription(topologyDescription, incomingServerDescription) {
|
||||
const currentServerDescription = topologyDescription.servers.get(incomingServerDescription.address);
|
||||
const currentTopologyVersion = currentServerDescription?.topologyVersion;
|
||||
return ((0, server_description_1.compareTopologyVersion)(currentTopologyVersion, incomingServerDescription.topologyVersion) > 0);
|
||||
}
|
||||
//# sourceMappingURL=topology.js.map
|
||||
1
node_modules/mongodb/lib/sdam/topology.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/topology.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
383
node_modules/mongodb/lib/sdam/topology_description.js
generated
vendored
Normal file
383
node_modules/mongodb/lib/sdam/topology_description.js
generated
vendored
Normal file
|
|
@ -0,0 +1,383 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.TopologyDescription = void 0;
|
||||
const bson_1 = require("../bson");
|
||||
const WIRE_CONSTANTS = require("../cmap/wire_protocol/constants");
|
||||
const error_1 = require("../error");
|
||||
const utils_1 = require("../utils");
|
||||
const common_1 = require("./common");
|
||||
const server_description_1 = require("./server_description");
|
||||
// constants related to compatibility checks
|
||||
const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION;
|
||||
const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION;
|
||||
const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION;
|
||||
const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION;
|
||||
const MONGOS_OR_UNKNOWN = new Set([common_1.ServerType.Mongos, common_1.ServerType.Unknown]);
|
||||
const MONGOS_OR_STANDALONE = new Set([common_1.ServerType.Mongos, common_1.ServerType.Standalone]);
|
||||
const NON_PRIMARY_RS_MEMBERS = new Set([
|
||||
common_1.ServerType.RSSecondary,
|
||||
common_1.ServerType.RSArbiter,
|
||||
common_1.ServerType.RSOther
|
||||
]);
|
||||
/**
|
||||
* Representation of a deployment of servers
|
||||
* @public
|
||||
*/
|
||||
class TopologyDescription {
|
||||
/**
|
||||
* Create a TopologyDescription
|
||||
*/
|
||||
constructor(topologyType, serverDescriptions = null, setName = null, maxSetVersion = null, maxElectionId = null, commonWireVersion = null, options = null) {
|
||||
options = options ?? {};
|
||||
this.type = topologyType ?? common_1.TopologyType.Unknown;
|
||||
this.servers = serverDescriptions ?? new Map();
|
||||
this.stale = false;
|
||||
this.compatible = true;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS ?? 0;
|
||||
this.localThresholdMS = options.localThresholdMS ?? 15;
|
||||
this.setName = setName ?? null;
|
||||
this.maxElectionId = maxElectionId ?? null;
|
||||
this.maxSetVersion = maxSetVersion ?? null;
|
||||
this.commonWireVersion = commonWireVersion ?? 0;
|
||||
// determine server compatibility
|
||||
for (const serverDescription of this.servers.values()) {
|
||||
// Load balancer mode is always compatible.
|
||||
if (serverDescription.type === common_1.ServerType.Unknown ||
|
||||
serverDescription.type === common_1.ServerType.LoadBalancer) {
|
||||
continue;
|
||||
}
|
||||
if (serverDescription.minWireVersion > MAX_SUPPORTED_WIRE_VERSION) {
|
||||
this.compatible = false;
|
||||
this.compatibilityError = `Server at ${serverDescription.address} requires wire version ${serverDescription.minWireVersion}, but this version of the driver only supports up to ${MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${MAX_SUPPORTED_SERVER_VERSION})`;
|
||||
}
|
||||
if (serverDescription.maxWireVersion < MIN_SUPPORTED_WIRE_VERSION) {
|
||||
this.compatible = false;
|
||||
this.compatibilityError = `Server at ${serverDescription.address} reports wire version ${serverDescription.maxWireVersion}, but this version of the driver requires at least ${MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${MIN_SUPPORTED_SERVER_VERSION}).`;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Whenever a client updates the TopologyDescription from a hello response, it MUST set
|
||||
// TopologyDescription.logicalSessionTimeoutMinutes to the smallest logicalSessionTimeoutMinutes
|
||||
// value among ServerDescriptions of all data-bearing server types. If any have a null
|
||||
// logicalSessionTimeoutMinutes, then TopologyDescription.logicalSessionTimeoutMinutes MUST be
|
||||
// set to null.
|
||||
this.logicalSessionTimeoutMinutes = null;
|
||||
for (const [, server] of this.servers) {
|
||||
if (server.isReadable) {
|
||||
if (server.logicalSessionTimeoutMinutes == null) {
|
||||
// If any of the servers have a null logicalSessionsTimeout, then the whole topology does
|
||||
this.logicalSessionTimeoutMinutes = null;
|
||||
break;
|
||||
}
|
||||
if (this.logicalSessionTimeoutMinutes == null) {
|
||||
// First server with a non null logicalSessionsTimeout
|
||||
this.logicalSessionTimeoutMinutes = server.logicalSessionTimeoutMinutes;
|
||||
continue;
|
||||
}
|
||||
// Always select the smaller of the:
|
||||
// current server logicalSessionsTimeout and the topologies logicalSessionsTimeout
|
||||
this.logicalSessionTimeoutMinutes = Math.min(this.logicalSessionTimeoutMinutes, server.logicalSessionTimeoutMinutes);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Returns a new TopologyDescription based on the SrvPollingEvent
|
||||
* @internal
|
||||
*/
|
||||
updateFromSrvPollingEvent(ev, srvMaxHosts = 0) {
|
||||
/** The SRV addresses defines the set of addresses we should be using */
|
||||
const incomingHostnames = ev.hostnames();
|
||||
const currentHostnames = new Set(this.servers.keys());
|
||||
const hostnamesToAdd = new Set(incomingHostnames);
|
||||
const hostnamesToRemove = new Set();
|
||||
for (const hostname of currentHostnames) {
|
||||
// filter hostnamesToAdd (made from incomingHostnames) down to what is *not* present in currentHostnames
|
||||
hostnamesToAdd.delete(hostname);
|
||||
if (!incomingHostnames.has(hostname)) {
|
||||
// If the SRV Records no longer include this hostname
|
||||
// we have to stop using it
|
||||
hostnamesToRemove.add(hostname);
|
||||
}
|
||||
}
|
||||
if (hostnamesToAdd.size === 0 && hostnamesToRemove.size === 0) {
|
||||
// No new hosts to add and none to remove
|
||||
return this;
|
||||
}
|
||||
const serverDescriptions = new Map(this.servers);
|
||||
for (const removedHost of hostnamesToRemove) {
|
||||
serverDescriptions.delete(removedHost);
|
||||
}
|
||||
if (hostnamesToAdd.size > 0) {
|
||||
if (srvMaxHosts === 0) {
|
||||
// Add all!
|
||||
for (const hostToAdd of hostnamesToAdd) {
|
||||
serverDescriptions.set(hostToAdd, new server_description_1.ServerDescription(hostToAdd));
|
||||
}
|
||||
}
|
||||
else if (serverDescriptions.size < srvMaxHosts) {
|
||||
// Add only the amount needed to get us back to srvMaxHosts
|
||||
const selectedHosts = (0, utils_1.shuffle)(hostnamesToAdd, srvMaxHosts - serverDescriptions.size);
|
||||
for (const selectedHostToAdd of selectedHosts) {
|
||||
serverDescriptions.set(selectedHostToAdd, new server_description_1.ServerDescription(selectedHostToAdd));
|
||||
}
|
||||
}
|
||||
}
|
||||
return new TopologyDescription(this.type, serverDescriptions, this.setName, this.maxSetVersion, this.maxElectionId, this.commonWireVersion, { heartbeatFrequencyMS: this.heartbeatFrequencyMS, localThresholdMS: this.localThresholdMS });
|
||||
}
|
||||
/**
|
||||
* Returns a copy of this description updated with a given ServerDescription
|
||||
* @internal
|
||||
*/
|
||||
update(serverDescription) {
|
||||
const address = serverDescription.address;
|
||||
// potentially mutated values
|
||||
let { type: topologyType, setName, maxSetVersion, maxElectionId, commonWireVersion } = this;
|
||||
const serverType = serverDescription.type;
|
||||
const serverDescriptions = new Map(this.servers);
|
||||
// update common wire version
|
||||
if (serverDescription.maxWireVersion !== 0) {
|
||||
if (commonWireVersion == null) {
|
||||
commonWireVersion = serverDescription.maxWireVersion;
|
||||
}
|
||||
else {
|
||||
commonWireVersion = Math.min(commonWireVersion, serverDescription.maxWireVersion);
|
||||
}
|
||||
}
|
||||
if (typeof serverDescription.setName === 'string' &&
|
||||
typeof setName === 'string' &&
|
||||
serverDescription.setName !== setName) {
|
||||
if (topologyType === common_1.TopologyType.Single) {
|
||||
// "Single" Topology with setName mismatch is direct connection usage, mark unknown do not remove
|
||||
serverDescription = new server_description_1.ServerDescription(address);
|
||||
}
|
||||
else {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
}
|
||||
// update the actual server description
|
||||
serverDescriptions.set(address, serverDescription);
|
||||
if (topologyType === common_1.TopologyType.Single) {
|
||||
// once we are defined as single, that never changes
|
||||
return new TopologyDescription(common_1.TopologyType.Single, serverDescriptions, setName, maxSetVersion, maxElectionId, commonWireVersion, { heartbeatFrequencyMS: this.heartbeatFrequencyMS, localThresholdMS: this.localThresholdMS });
|
||||
}
|
||||
if (topologyType === common_1.TopologyType.Unknown) {
|
||||
if (serverType === common_1.ServerType.Standalone && this.servers.size !== 1) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
else {
|
||||
topologyType = topologyTypeForServerType(serverType);
|
||||
}
|
||||
}
|
||||
if (topologyType === common_1.TopologyType.Sharded) {
|
||||
if (!MONGOS_OR_UNKNOWN.has(serverType)) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
}
|
||||
if (topologyType === common_1.TopologyType.ReplicaSetNoPrimary) {
|
||||
if (MONGOS_OR_STANDALONE.has(serverType)) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
if (serverType === common_1.ServerType.RSPrimary) {
|
||||
const result = updateRsFromPrimary(serverDescriptions, serverDescription, setName, maxSetVersion, maxElectionId);
|
||||
topologyType = result[0];
|
||||
setName = result[1];
|
||||
maxSetVersion = result[2];
|
||||
maxElectionId = result[3];
|
||||
}
|
||||
else if (NON_PRIMARY_RS_MEMBERS.has(serverType)) {
|
||||
const result = updateRsNoPrimaryFromMember(serverDescriptions, serverDescription, setName);
|
||||
topologyType = result[0];
|
||||
setName = result[1];
|
||||
}
|
||||
}
|
||||
if (topologyType === common_1.TopologyType.ReplicaSetWithPrimary) {
|
||||
if (MONGOS_OR_STANDALONE.has(serverType)) {
|
||||
serverDescriptions.delete(address);
|
||||
topologyType = checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
else if (serverType === common_1.ServerType.RSPrimary) {
|
||||
const result = updateRsFromPrimary(serverDescriptions, serverDescription, setName, maxSetVersion, maxElectionId);
|
||||
topologyType = result[0];
|
||||
setName = result[1];
|
||||
maxSetVersion = result[2];
|
||||
maxElectionId = result[3];
|
||||
}
|
||||
else if (NON_PRIMARY_RS_MEMBERS.has(serverType)) {
|
||||
topologyType = updateRsWithPrimaryFromMember(serverDescriptions, serverDescription, setName);
|
||||
}
|
||||
else {
|
||||
topologyType = checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
}
|
||||
return new TopologyDescription(topologyType, serverDescriptions, setName, maxSetVersion, maxElectionId, commonWireVersion, { heartbeatFrequencyMS: this.heartbeatFrequencyMS, localThresholdMS: this.localThresholdMS });
|
||||
}
|
||||
get error() {
|
||||
const descriptionsWithError = Array.from(this.servers.values()).filter((sd) => sd.error);
|
||||
if (descriptionsWithError.length > 0) {
|
||||
return descriptionsWithError[0].error;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
* Determines if the topology description has any known servers
|
||||
*/
|
||||
get hasKnownServers() {
|
||||
return Array.from(this.servers.values()).some((sd) => sd.type !== common_1.ServerType.Unknown);
|
||||
}
|
||||
/**
|
||||
* Determines if this topology description has a data-bearing server available.
|
||||
*/
|
||||
get hasDataBearingServers() {
|
||||
return Array.from(this.servers.values()).some((sd) => sd.isDataBearing);
|
||||
}
|
||||
/**
|
||||
* Determines if the topology has a definition for the provided address
|
||||
* @internal
|
||||
*/
|
||||
hasServer(address) {
|
||||
return this.servers.has(address);
|
||||
}
|
||||
/**
|
||||
* Returns a JSON-serializable representation of the TopologyDescription. This is primarily
|
||||
* intended for use with JSON.stringify().
|
||||
*
|
||||
* This method will not throw.
|
||||
*/
|
||||
toJSON() {
|
||||
return bson_1.EJSON.serialize(this);
|
||||
}
|
||||
}
|
||||
exports.TopologyDescription = TopologyDescription;
|
||||
function topologyTypeForServerType(serverType) {
|
||||
switch (serverType) {
|
||||
case common_1.ServerType.Standalone:
|
||||
return common_1.TopologyType.Single;
|
||||
case common_1.ServerType.Mongos:
|
||||
return common_1.TopologyType.Sharded;
|
||||
case common_1.ServerType.RSPrimary:
|
||||
return common_1.TopologyType.ReplicaSetWithPrimary;
|
||||
case common_1.ServerType.RSOther:
|
||||
case common_1.ServerType.RSSecondary:
|
||||
return common_1.TopologyType.ReplicaSetNoPrimary;
|
||||
default:
|
||||
return common_1.TopologyType.Unknown;
|
||||
}
|
||||
}
|
||||
function updateRsFromPrimary(serverDescriptions, serverDescription, setName = null, maxSetVersion = null, maxElectionId = null) {
|
||||
const setVersionElectionIdMismatch = (serverDescription, maxSetVersion, maxElectionId) => {
|
||||
return (`primary marked stale due to electionId/setVersion mismatch:` +
|
||||
` server setVersion: ${serverDescription.setVersion},` +
|
||||
` server electionId: ${serverDescription.electionId},` +
|
||||
` topology setVersion: ${maxSetVersion},` +
|
||||
` topology electionId: ${maxElectionId}`);
|
||||
};
|
||||
setName = setName || serverDescription.setName;
|
||||
if (setName !== serverDescription.setName) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
if (serverDescription.maxWireVersion >= 17) {
|
||||
const electionIdComparison = (0, utils_1.compareObjectId)(maxElectionId, serverDescription.electionId);
|
||||
const maxElectionIdIsEqual = electionIdComparison === 0;
|
||||
const maxElectionIdIsLess = electionIdComparison === -1;
|
||||
const maxSetVersionIsLessOrEqual = (maxSetVersion ?? -1) <= (serverDescription.setVersion ?? -1);
|
||||
if (maxElectionIdIsLess || (maxElectionIdIsEqual && maxSetVersionIsLessOrEqual)) {
|
||||
// The reported electionId was greater
|
||||
// or the electionId was equal and reported setVersion was greater
|
||||
// Always update both values, they are a tuple
|
||||
maxElectionId = serverDescription.electionId;
|
||||
maxSetVersion = serverDescription.setVersion;
|
||||
}
|
||||
else {
|
||||
// Stale primary
|
||||
// replace serverDescription with a default ServerDescription of type "Unknown"
|
||||
serverDescriptions.set(serverDescription.address, new server_description_1.ServerDescription(serverDescription.address, undefined, {
|
||||
error: new error_1.MongoStalePrimaryError(setVersionElectionIdMismatch(serverDescription, maxSetVersion, maxElectionId))
|
||||
}));
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
}
|
||||
else {
|
||||
const electionId = serverDescription.electionId ? serverDescription.electionId : null;
|
||||
if (serverDescription.setVersion && electionId) {
|
||||
if (maxSetVersion && maxElectionId) {
|
||||
if (maxSetVersion > serverDescription.setVersion ||
|
||||
(0, utils_1.compareObjectId)(maxElectionId, electionId) > 0) {
|
||||
// this primary is stale, we must remove it
|
||||
serverDescriptions.set(serverDescription.address, new server_description_1.ServerDescription(serverDescription.address, undefined, {
|
||||
error: new error_1.MongoStalePrimaryError(setVersionElectionIdMismatch(serverDescription, maxSetVersion, maxElectionId))
|
||||
}));
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
}
|
||||
maxElectionId = serverDescription.electionId;
|
||||
}
|
||||
if (serverDescription.setVersion != null &&
|
||||
(maxSetVersion == null || serverDescription.setVersion > maxSetVersion)) {
|
||||
maxSetVersion = serverDescription.setVersion;
|
||||
}
|
||||
}
|
||||
// We've heard from the primary. Is it the same primary as before?
|
||||
for (const [address, server] of serverDescriptions) {
|
||||
if (server.type === common_1.ServerType.RSPrimary && server.address !== serverDescription.address) {
|
||||
// Reset old primary's type to Unknown.
|
||||
serverDescriptions.set(address, new server_description_1.ServerDescription(server.address, undefined, {
|
||||
error: new error_1.MongoStalePrimaryError('primary marked stale due to discovery of newer primary')
|
||||
}));
|
||||
// There can only be one primary
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Discover new hosts from this primary's response.
|
||||
serverDescription.allHosts.forEach((address) => {
|
||||
if (!serverDescriptions.has(address)) {
|
||||
serverDescriptions.set(address, new server_description_1.ServerDescription(address));
|
||||
}
|
||||
});
|
||||
// Remove hosts not in the response.
|
||||
const currentAddresses = Array.from(serverDescriptions.keys());
|
||||
const responseAddresses = serverDescription.allHosts;
|
||||
currentAddresses
|
||||
.filter((addr) => responseAddresses.indexOf(addr) === -1)
|
||||
.forEach((address) => {
|
||||
serverDescriptions.delete(address);
|
||||
});
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
function updateRsWithPrimaryFromMember(serverDescriptions, serverDescription, setName = null) {
|
||||
if (setName == null) {
|
||||
// TODO(NODE-3483): should be an appropriate runtime error
|
||||
throw new error_1.MongoRuntimeError('Argument "setName" is required if connected to a replica set');
|
||||
}
|
||||
if (setName !== serverDescription.setName ||
|
||||
(serverDescription.me && serverDescription.address !== serverDescription.me)) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
}
|
||||
return checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
function updateRsNoPrimaryFromMember(serverDescriptions, serverDescription, setName = null) {
|
||||
const topologyType = common_1.TopologyType.ReplicaSetNoPrimary;
|
||||
setName = setName ?? serverDescription.setName;
|
||||
if (setName !== serverDescription.setName) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
return [topologyType, setName];
|
||||
}
|
||||
serverDescription.allHosts.forEach((address) => {
|
||||
if (!serverDescriptions.has(address)) {
|
||||
serverDescriptions.set(address, new server_description_1.ServerDescription(address));
|
||||
}
|
||||
});
|
||||
if (serverDescription.me && serverDescription.address !== serverDescription.me) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
}
|
||||
return [topologyType, setName];
|
||||
}
|
||||
function checkHasPrimary(serverDescriptions) {
|
||||
for (const serverDescription of serverDescriptions.values()) {
|
||||
if (serverDescription.type === common_1.ServerType.RSPrimary) {
|
||||
return common_1.TopologyType.ReplicaSetWithPrimary;
|
||||
}
|
||||
}
|
||||
return common_1.TopologyType.ReplicaSetNoPrimary;
|
||||
}
|
||||
//# sourceMappingURL=topology_description.js.map
|
||||
1
node_modules/mongodb/lib/sdam/topology_description.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/topology_description.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
Loading…
Add table
Add a link
Reference in a new issue